shipwright-cli 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +926 -0
- package/claude-code/CLAUDE.md.shipwright +125 -0
- package/claude-code/hooks/notify-idle.sh +35 -0
- package/claude-code/hooks/pre-compact-save.sh +57 -0
- package/claude-code/hooks/task-completed.sh +170 -0
- package/claude-code/hooks/teammate-idle.sh +68 -0
- package/claude-code/settings.json.template +184 -0
- package/completions/_shipwright +140 -0
- package/completions/shipwright.bash +89 -0
- package/completions/shipwright.fish +107 -0
- package/docs/KNOWN-ISSUES.md +199 -0
- package/docs/TIPS.md +331 -0
- package/docs/definition-of-done.example.md +16 -0
- package/docs/patterns/README.md +139 -0
- package/docs/patterns/audit-loop.md +149 -0
- package/docs/patterns/bug-hunt.md +183 -0
- package/docs/patterns/feature-implementation.md +159 -0
- package/docs/patterns/refactoring.md +183 -0
- package/docs/patterns/research-exploration.md +144 -0
- package/docs/patterns/test-generation.md +173 -0
- package/package.json +49 -0
- package/scripts/adapters/docker-deploy.sh +50 -0
- package/scripts/adapters/fly-deploy.sh +41 -0
- package/scripts/adapters/iterm2-adapter.sh +122 -0
- package/scripts/adapters/railway-deploy.sh +34 -0
- package/scripts/adapters/tmux-adapter.sh +87 -0
- package/scripts/adapters/vercel-deploy.sh +35 -0
- package/scripts/adapters/wezterm-adapter.sh +103 -0
- package/scripts/cct +242 -0
- package/scripts/cct-cleanup.sh +172 -0
- package/scripts/cct-cost.sh +590 -0
- package/scripts/cct-daemon.sh +3189 -0
- package/scripts/cct-doctor.sh +328 -0
- package/scripts/cct-fix.sh +478 -0
- package/scripts/cct-fleet.sh +904 -0
- package/scripts/cct-init.sh +282 -0
- package/scripts/cct-logs.sh +273 -0
- package/scripts/cct-loop.sh +1332 -0
- package/scripts/cct-memory.sh +1148 -0
- package/scripts/cct-pipeline.sh +3844 -0
- package/scripts/cct-prep.sh +1352 -0
- package/scripts/cct-ps.sh +168 -0
- package/scripts/cct-reaper.sh +390 -0
- package/scripts/cct-session.sh +284 -0
- package/scripts/cct-status.sh +169 -0
- package/scripts/cct-templates.sh +242 -0
- package/scripts/cct-upgrade.sh +422 -0
- package/scripts/cct-worktree.sh +405 -0
- package/scripts/postinstall.mjs +96 -0
- package/templates/pipelines/autonomous.json +71 -0
- package/templates/pipelines/cost-aware.json +95 -0
- package/templates/pipelines/deployed.json +79 -0
- package/templates/pipelines/enterprise.json +114 -0
- package/templates/pipelines/fast.json +63 -0
- package/templates/pipelines/full.json +104 -0
- package/templates/pipelines/hotfix.json +63 -0
- package/templates/pipelines/standard.json +91 -0
- package/tmux/claude-teams-overlay.conf +109 -0
- package/tmux/templates/architecture.json +19 -0
- package/tmux/templates/bug-fix.json +24 -0
- package/tmux/templates/code-review.json +24 -0
- package/tmux/templates/devops.json +19 -0
- package/tmux/templates/documentation.json +19 -0
- package/tmux/templates/exploration.json +19 -0
- package/tmux/templates/feature-dev.json +24 -0
- package/tmux/templates/full-stack.json +24 -0
- package/tmux/templates/migration.json +24 -0
- package/tmux/templates/refactor.json +19 -0
- package/tmux/templates/security-audit.json +24 -0
- package/tmux/templates/testing.json +24 -0
- package/tmux/tmux.conf +167 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
# Pattern: Test Generation
|
|
2
|
+
|
|
3
|
+
Build comprehensive test coverage using parallel agents that discover, generate, and validate tests iteratively.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## When to Use
|
|
8
|
+
|
|
9
|
+
- **Coverage campaigns** — systematically adding tests to a poorly-tested codebase
|
|
10
|
+
- **New feature testing** — generating unit + integration tests for freshly-built features
|
|
11
|
+
- **Edge case hunting** — finding and testing boundary conditions, error paths, race conditions
|
|
12
|
+
- **Test suite modernization** — upgrading old test patterns to current conventions
|
|
13
|
+
|
|
14
|
+
**Don't use** for writing a single test file, or when coverage is already good and you just need one more test.
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## Recommended Team Composition
|
|
19
|
+
|
|
20
|
+
| Role | Agent Name | Focus |
|
|
21
|
+
|------|-----------|-------|
|
|
22
|
+
| **Team Lead** | `lead` | Orchestration, runs test suite, tracks coverage gaps |
|
|
23
|
+
| **Test Writer 1** | `unit-tests` | Unit tests for core business logic |
|
|
24
|
+
| **Test Writer 2** | `integration-tests` | Integration tests, API tests, cross-module tests |
|
|
25
|
+
| **Test Writer 3** *(optional)* | `edge-cases` | Edge cases, error paths, boundary conditions |
|
|
26
|
+
|
|
27
|
+
> **Tip:** For smaller projects, 2 agents (unit + integration) is enough. The team lead handles edge cases.
|
|
28
|
+
|
|
29
|
+
---
|
|
30
|
+
|
|
31
|
+
## Wave Breakdown
|
|
32
|
+
|
|
33
|
+
### Wave 1: Discover
|
|
34
|
+
|
|
35
|
+
**Goal:** Find what needs testing and understand existing test patterns.
|
|
36
|
+
|
|
37
|
+
```
|
|
38
|
+
┌──────────────────┬──────────────────┐
|
|
39
|
+
│ Agent: scanner │ Agent: patterns │
|
|
40
|
+
│ Find all testable │ Analyze existing │
|
|
41
|
+
│ functions, map │ test patterns, │
|
|
42
|
+
│ current coverage │ fixtures, mocks │
|
|
43
|
+
└──────────────────┴──────────────────┘
|
|
44
|
+
↓ Team lead identifies coverage gaps
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
**Scanner agent:** "Run the test suite with coverage reporting. List all files/functions below the coverage threshold. Identify untested public API surface."
|
|
48
|
+
|
|
49
|
+
**Patterns agent:** "Read existing test files. Document the patterns used — test runner, assertion style, mock strategy, fixture patterns. List file locations of good examples."
|
|
50
|
+
|
|
51
|
+
### Wave 2: Generate (Parallel Batches)
|
|
52
|
+
|
|
53
|
+
**Goal:** Write tests in parallel, partitioned by module or test type.
|
|
54
|
+
|
|
55
|
+
```
|
|
56
|
+
┌──────────────────┬──────────────────┬──────────────────┐
|
|
57
|
+
│ Agent: unit-tests│ Agent: int-tests │ Agent: edge-cases│
|
|
58
|
+
│ Unit tests for │ Integration tests│ Edge cases for │
|
|
59
|
+
│ src/services/ │ for src/api/ │ auth + payments │
|
|
60
|
+
│ src/models/ │ routes │ error paths │
|
|
61
|
+
└──────────────────┴──────────────────┴──────────────────┘
|
|
62
|
+
↓ Team lead runs full test suite
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
**Critical:** Each agent writes tests in **different files**. Partition by directory or module:
|
|
66
|
+
- Unit tests agent → `src/services/__tests__/`, `src/models/__tests__/`
|
|
67
|
+
- Integration agent → `src/api/__tests__/`, `tests/integration/`
|
|
68
|
+
- Edge cases agent → `tests/edge-cases/`
|
|
69
|
+
|
|
70
|
+
### Wave 3: Validate & Fix
|
|
71
|
+
|
|
72
|
+
**Goal:** Run all tests, fix failures, fill remaining gaps.
|
|
73
|
+
|
|
74
|
+
```
|
|
75
|
+
┌──────────────────┬──────────────────┐
|
|
76
|
+
│ Agent: fixer-1 │ Agent: fixer-2 │
|
|
77
|
+
│ Fix failing unit │ Fix failing │
|
|
78
|
+
│ tests from Wave 2 │ integration tests │
|
|
79
|
+
└──────────────────┴──────────────────┘
|
|
80
|
+
↓ Team lead runs suite again
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Wave 4+: Iterate Until Green
|
|
84
|
+
|
|
85
|
+
Repeat Wave 3 until:
|
|
86
|
+
- All tests pass
|
|
87
|
+
- Coverage meets the target threshold
|
|
88
|
+
- No test failures remain
|
|
89
|
+
|
|
90
|
+
> **Set a wave limit.** 5-6 waves is typical for test generation. If tests aren't passing after 6 waves, the issue is likely in the code under test, not the tests themselves.
|
|
91
|
+
|
|
92
|
+
---
|
|
93
|
+
|
|
94
|
+
## File-Based State Example
|
|
95
|
+
|
|
96
|
+
`.claude/team-state.local.md`:
|
|
97
|
+
|
|
98
|
+
```markdown
|
|
99
|
+
---
|
|
100
|
+
wave: 3
|
|
101
|
+
status: in_progress
|
|
102
|
+
goal: "Achieve 80% test coverage for src/api/ and src/services/"
|
|
103
|
+
started_at: 2026-02-07T14:00:00Z
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## Coverage Baseline
|
|
107
|
+
- src/api/: 32% → target 80%
|
|
108
|
+
- src/services/: 45% → target 80%
|
|
109
|
+
- src/models/: 71% → target 80%
|
|
110
|
+
|
|
111
|
+
## Completed
|
|
112
|
+
- [x] Scanned coverage, identified 23 untested functions (wave-1-scanner.md)
|
|
113
|
+
- [x] Documented test patterns: vitest, vi.mock(), factory fixtures (wave-1-patterns.md)
|
|
114
|
+
- [x] Generated 14 unit tests for src/services/ (wave-2-unit.md)
|
|
115
|
+
- [x] Generated 8 integration tests for src/api/ (wave-2-integration.md)
|
|
116
|
+
- [x] Generated 6 edge case tests for auth flows (wave-2-edge.md)
|
|
117
|
+
|
|
118
|
+
## In Progress (Wave 3)
|
|
119
|
+
- [ ] Fix 3 failing unit tests (mock setup issues)
|
|
120
|
+
- [ ] Fix 2 failing integration tests (missing test DB seed)
|
|
121
|
+
|
|
122
|
+
## Coverage After Wave 2
|
|
123
|
+
- src/api/: 32% → 61%
|
|
124
|
+
- src/services/: 45% → 74%
|
|
125
|
+
- src/models/: 71% → 78%
|
|
126
|
+
|
|
127
|
+
## Agent Outputs
|
|
128
|
+
- wave-1-scanner.md
|
|
129
|
+
- wave-1-patterns.md
|
|
130
|
+
- wave-2-unit.md
|
|
131
|
+
- wave-2-integration.md
|
|
132
|
+
- wave-2-edge.md
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
---
|
|
136
|
+
|
|
137
|
+
## Example Commands
|
|
138
|
+
|
|
139
|
+
```bash
|
|
140
|
+
# Create a 3-agent test generation team
|
|
141
|
+
shipwright session test-coverage
|
|
142
|
+
|
|
143
|
+
# Between waves, run the test suite from the lead pane:
|
|
144
|
+
# pnpm test --coverage
|
|
145
|
+
|
|
146
|
+
# Watch agents writing tests in parallel across panes
|
|
147
|
+
# Use prefix + Ctrl-t for status dashboard
|
|
148
|
+
|
|
149
|
+
# After all waves complete
|
|
150
|
+
shipwright cleanup --force
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
---
|
|
154
|
+
|
|
155
|
+
## Tips
|
|
156
|
+
|
|
157
|
+
- **Always discover patterns first (Wave 1).** Agents that don't know the existing test conventions will write tests that look nothing like the rest of the suite.
|
|
158
|
+
- **Partition test files, not test cases.** Each agent should own entire test files, not individual test cases within a shared file. This prevents write conflicts.
|
|
159
|
+
- **Run the test suite between every wave.** The team lead should run tests after each wave and feed failure details into the next wave's agent prompts.
|
|
160
|
+
- **Include test file paths in prompts.** Don't say "write tests for the auth service." Say "write tests in `src/services/__tests__/auth.test.ts` for functions exported from `src/services/auth.ts`, using the vitest patterns shown in `src/services/__tests__/user.test.ts`."
|
|
161
|
+
- **Wave 3+ agents need failure context.** Copy-paste the actual test failure output into the agent's prompt so it can fix the specific issue.
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
## Anti-Patterns
|
|
166
|
+
|
|
167
|
+
| Don't | Why |
|
|
168
|
+
|-------|-----|
|
|
169
|
+
| Have two agents write to the same test file | File conflict — one agent's tests will be lost |
|
|
170
|
+
| Skip running tests between waves | You'll compound errors across waves |
|
|
171
|
+
| Generate tests without reading existing patterns | Tests will be stylistically inconsistent |
|
|
172
|
+
| Set coverage target at 100% | Diminishing returns — edge case tests past 85% are often brittle |
|
|
173
|
+
| Keep iterating past 6 waves | If tests still fail, the problem is in the code, not the tests |
|
package/package.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "shipwright-cli",
|
|
3
|
+
"version": "1.7.0",
|
|
4
|
+
"description": "Orchestrate autonomous Claude Code agent teams in tmux",
|
|
5
|
+
"bin": {
|
|
6
|
+
"shipwright": "./scripts/cct",
|
|
7
|
+
"sw": "./scripts/cct"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"scripts/",
|
|
11
|
+
"!scripts/*-test.sh",
|
|
12
|
+
"!scripts/build-release.sh",
|
|
13
|
+
"!scripts/update-version.sh",
|
|
14
|
+
"!scripts/install-remote.sh",
|
|
15
|
+
"!scripts/install-completions.sh",
|
|
16
|
+
"templates/",
|
|
17
|
+
"tmux/templates/",
|
|
18
|
+
"tmux/tmux.conf",
|
|
19
|
+
"tmux/claude-teams-overlay.conf",
|
|
20
|
+
"claude-code/",
|
|
21
|
+
"completions/",
|
|
22
|
+
"docs/",
|
|
23
|
+
"LICENSE",
|
|
24
|
+
"README.md"
|
|
25
|
+
],
|
|
26
|
+
"scripts": {
|
|
27
|
+
"postinstall": "node scripts/postinstall.mjs",
|
|
28
|
+
"test": "bash scripts/cct-pipeline-test.sh && bash scripts/cct-daemon-test.sh && bash scripts/cct-prep-test.sh && bash scripts/cct-fleet-test.sh && bash scripts/cct-fix-test.sh && bash scripts/cct-memory-test.sh"
|
|
29
|
+
},
|
|
30
|
+
"keywords": [
|
|
31
|
+
"claude",
|
|
32
|
+
"claude-code",
|
|
33
|
+
"tmux",
|
|
34
|
+
"agent-teams",
|
|
35
|
+
"autonomous",
|
|
36
|
+
"pipeline",
|
|
37
|
+
"ai"
|
|
38
|
+
],
|
|
39
|
+
"author": "Seth Ford",
|
|
40
|
+
"license": "MIT",
|
|
41
|
+
"repository": {
|
|
42
|
+
"type": "git",
|
|
43
|
+
"url": "https://github.com/sethdford/shipwright.git"
|
|
44
|
+
},
|
|
45
|
+
"homepage": "https://sethdford.github.io/shipwright",
|
|
46
|
+
"engines": {
|
|
47
|
+
"node": ">=20"
|
|
48
|
+
}
|
|
49
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ docker-deploy.sh — Deploy adapter for Docker / Docker Compose ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Sourced by shipwright init --deploy to generate platform-specific commands. ║
|
|
6
|
+
# ║ Exports: staging_cmd, production_cmd, rollback_cmd, detect_platform ║
|
|
7
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
+
|
|
9
|
+
adapter_name="docker"
|
|
10
|
+
|
|
11
|
+
detect_platform() {
|
|
12
|
+
# Docker: look for Dockerfile or docker-compose.yml/yaml
|
|
13
|
+
[[ -f "Dockerfile" ]] || [[ -f "docker-compose.yml" ]] || [[ -f "docker-compose.yaml" ]]
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
get_staging_cmd() {
|
|
17
|
+
if [[ -f "docker-compose.yml" ]] || [[ -f "docker-compose.yaml" ]]; then
|
|
18
|
+
echo "docker compose build && docker compose up -d"
|
|
19
|
+
else
|
|
20
|
+
echo "docker build -t \$(basename \$(pwd)):staging . && docker run -d --name \$(basename \$(pwd))-staging \$(basename \$(pwd)):staging"
|
|
21
|
+
fi
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
get_production_cmd() {
|
|
25
|
+
if [[ -f "docker-compose.yml" ]] || [[ -f "docker-compose.yaml" ]]; then
|
|
26
|
+
echo "docker compose -f docker-compose.yml -f docker-compose.prod.yml build && docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d"
|
|
27
|
+
else
|
|
28
|
+
echo "docker build -t \$(basename \$(pwd)):latest . && docker run -d --name \$(basename \$(pwd)) \$(basename \$(pwd)):latest"
|
|
29
|
+
fi
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
get_rollback_cmd() {
|
|
33
|
+
if [[ -f "docker-compose.yml" ]] || [[ -f "docker-compose.yaml" ]]; then
|
|
34
|
+
echo "docker compose down && docker compose up -d --force-recreate"
|
|
35
|
+
else
|
|
36
|
+
echo "docker stop \$(basename \$(pwd)) && docker rm \$(basename \$(pwd)) && docker run -d --name \$(basename \$(pwd)) \$(basename \$(pwd)):previous"
|
|
37
|
+
fi
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
get_health_url() {
|
|
41
|
+
echo "http://localhost:3000/health"
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
get_smoke_cmd() {
|
|
45
|
+
if [[ -f "docker-compose.yml" ]] || [[ -f "docker-compose.yaml" ]]; then
|
|
46
|
+
echo "docker compose ps --format json | jq -e 'all(.State == \"running\")'"
|
|
47
|
+
else
|
|
48
|
+
echo "docker inspect --format='{{.State.Running}}' \$(basename \$(pwd)) | grep -q true"
|
|
49
|
+
fi
|
|
50
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ fly-deploy.sh — Deploy adapter for Fly.io ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Sourced by shipwright init --deploy to generate platform-specific commands. ║
|
|
6
|
+
# ║ Exports: staging_cmd, production_cmd, rollback_cmd, detect_platform ║
|
|
7
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
+
|
|
9
|
+
adapter_name="fly"
|
|
10
|
+
|
|
11
|
+
detect_platform() {
|
|
12
|
+
# Fly.io: look for fly.toml
|
|
13
|
+
[[ -f "fly.toml" ]]
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
get_staging_cmd() {
|
|
17
|
+
echo "fly deploy --strategy canary --wait-timeout 120"
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
get_production_cmd() {
|
|
21
|
+
echo "fly deploy --strategy rolling --wait-timeout 300"
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
get_rollback_cmd() {
|
|
25
|
+
echo "fly releases list --json | jq -r '.[1].version' | xargs -I{} fly deploy --image-ref {}"
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
get_health_url() {
|
|
29
|
+
# Extract app name from fly.toml for health URL
|
|
30
|
+
local app_name
|
|
31
|
+
app_name=$(grep '^app\s*=' fly.toml 2>/dev/null | head -1 | sed 's/.*=\s*"\?\([^"]*\)"\?/\1/' | tr -d ' ')
|
|
32
|
+
if [[ -n "$app_name" ]]; then
|
|
33
|
+
echo "https://${app_name}.fly.dev/health"
|
|
34
|
+
else
|
|
35
|
+
echo ""
|
|
36
|
+
fi
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
get_smoke_cmd() {
|
|
40
|
+
echo "fly status --json | jq -e '.Status == \"running\"'"
|
|
41
|
+
}
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ iterm2-adapter.sh — Terminal adapter for iTerm2 tab management ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Uses AppleScript (osascript) to create iTerm2 tabs with named titles ║
|
|
6
|
+
# ║ and working directories. macOS only. ║
|
|
7
|
+
# ║ Sourced by cct-session.sh — exports: spawn_agent, list_agents, ║
|
|
8
|
+
# ║ kill_agent, focus_agent. ║
|
|
9
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
10
|
+
|
|
11
|
+
# Verify we're on macOS and iTerm2 is available
|
|
12
|
+
if [[ "$(uname)" != "Darwin" ]]; then
|
|
13
|
+
echo -e "\033[38;2;248;113;113m\033[1m✗\033[0m iTerm2 adapter requires macOS." >&2
|
|
14
|
+
exit 1
|
|
15
|
+
fi
|
|
16
|
+
|
|
17
|
+
# Track created tab IDs for agent management
|
|
18
|
+
declare -a _ITERM2_TAB_NAMES=()
|
|
19
|
+
|
|
20
|
+
spawn_agent() {
|
|
21
|
+
local name="$1"
|
|
22
|
+
local working_dir="${2:-$PWD}"
|
|
23
|
+
local command="${3:-}"
|
|
24
|
+
|
|
25
|
+
# Resolve working_dir — tmux format #{pane_current_path} won't work here
|
|
26
|
+
if [[ "$working_dir" == *"pane_current_path"* || "$working_dir" == "." ]]; then
|
|
27
|
+
working_dir="$PWD"
|
|
28
|
+
fi
|
|
29
|
+
|
|
30
|
+
osascript <<APPLESCRIPT
|
|
31
|
+
tell application "iTerm2"
|
|
32
|
+
tell current window
|
|
33
|
+
-- Create a new tab
|
|
34
|
+
set newTab to (create tab with default profile)
|
|
35
|
+
tell current session of newTab
|
|
36
|
+
-- Set the name/title
|
|
37
|
+
set name to "${name}"
|
|
38
|
+
-- Change to the working directory
|
|
39
|
+
write text "cd '${working_dir}' && clear"
|
|
40
|
+
end tell
|
|
41
|
+
end tell
|
|
42
|
+
end tell
|
|
43
|
+
APPLESCRIPT
|
|
44
|
+
|
|
45
|
+
# Run the command if provided
|
|
46
|
+
if [[ -n "$command" ]]; then
|
|
47
|
+
sleep 0.3
|
|
48
|
+
osascript <<APPLESCRIPT
|
|
49
|
+
tell application "iTerm2"
|
|
50
|
+
tell current window
|
|
51
|
+
tell current session of current tab
|
|
52
|
+
write text "${command}"
|
|
53
|
+
end tell
|
|
54
|
+
end tell
|
|
55
|
+
end tell
|
|
56
|
+
APPLESCRIPT
|
|
57
|
+
fi
|
|
58
|
+
|
|
59
|
+
_ITERM2_TAB_NAMES+=("$name")
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
list_agents() {
|
|
63
|
+
# List all tabs in the current iTerm2 window
|
|
64
|
+
osascript <<'APPLESCRIPT'
|
|
65
|
+
tell application "iTerm2"
|
|
66
|
+
tell current window
|
|
67
|
+
set output to ""
|
|
68
|
+
set tabIndex to 0
|
|
69
|
+
repeat with aTab in tabs
|
|
70
|
+
set tabIndex to tabIndex + 1
|
|
71
|
+
tell current session of aTab
|
|
72
|
+
set sessionName to name
|
|
73
|
+
set output to output & tabIndex & ": " & sessionName & linefeed
|
|
74
|
+
end tell
|
|
75
|
+
end repeat
|
|
76
|
+
return output
|
|
77
|
+
end tell
|
|
78
|
+
end tell
|
|
79
|
+
APPLESCRIPT
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
kill_agent() {
|
|
83
|
+
local name="$1"
|
|
84
|
+
|
|
85
|
+
osascript <<APPLESCRIPT
|
|
86
|
+
tell application "iTerm2"
|
|
87
|
+
tell current window
|
|
88
|
+
repeat with aTab in tabs
|
|
89
|
+
tell current session of aTab
|
|
90
|
+
if name is "${name}" then
|
|
91
|
+
close
|
|
92
|
+
return "closed"
|
|
93
|
+
end if
|
|
94
|
+
end tell
|
|
95
|
+
end repeat
|
|
96
|
+
end tell
|
|
97
|
+
end tell
|
|
98
|
+
return "not found"
|
|
99
|
+
APPLESCRIPT
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
focus_agent() {
|
|
103
|
+
local name="$1"
|
|
104
|
+
|
|
105
|
+
osascript <<APPLESCRIPT
|
|
106
|
+
tell application "iTerm2"
|
|
107
|
+
tell current window
|
|
108
|
+
set tabIndex to 1
|
|
109
|
+
repeat with aTab in tabs
|
|
110
|
+
tell current session of aTab
|
|
111
|
+
if name is "${name}" then
|
|
112
|
+
select aTab
|
|
113
|
+
return "focused"
|
|
114
|
+
end if
|
|
115
|
+
end tell
|
|
116
|
+
set tabIndex to tabIndex + 1
|
|
117
|
+
end repeat
|
|
118
|
+
end tell
|
|
119
|
+
end tell
|
|
120
|
+
return "not found"
|
|
121
|
+
APPLESCRIPT
|
|
122
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ railway-deploy.sh — Deploy adapter for Railway ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Sourced by shipwright init --deploy to generate platform-specific commands. ║
|
|
6
|
+
# ║ Exports: staging_cmd, production_cmd, rollback_cmd, detect_platform ║
|
|
7
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
+
|
|
9
|
+
adapter_name="railway"
|
|
10
|
+
|
|
11
|
+
detect_platform() {
|
|
12
|
+
# Railway: look for railway.toml or .railway/ directory
|
|
13
|
+
[[ -f "railway.toml" ]] || [[ -d ".railway" ]] || [[ -f "railway.json" ]]
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
get_staging_cmd() {
|
|
17
|
+
echo "railway up --environment staging --detach"
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
get_production_cmd() {
|
|
21
|
+
echo "railway up --environment production --detach"
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
get_rollback_cmd() {
|
|
25
|
+
echo "railway rollback --yes"
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
get_health_url() {
|
|
29
|
+
echo ""
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
get_smoke_cmd() {
|
|
33
|
+
echo "railway status --json 2>/dev/null | jq -e '.status == \"running\"'"
|
|
34
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ tmux-adapter.sh — Terminal adapter for tmux pane management ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Default adapter. Creates tmux panes within a named window. ║
|
|
6
|
+
# ║ Sourced by cct-session.sh — exports: spawn_agent, list_agents, ║
|
|
7
|
+
# ║ kill_agent, focus_agent. ║
|
|
8
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
9
|
+
|
|
10
|
+
# Track spawned panes by agent name
|
|
11
|
+
declare -A _TMUX_AGENT_PANES
|
|
12
|
+
|
|
13
|
+
spawn_agent() {
|
|
14
|
+
local name="$1"
|
|
15
|
+
local working_dir="${2:-#{pane_current_path}}"
|
|
16
|
+
local command="${3:-}"
|
|
17
|
+
|
|
18
|
+
# If no window exists yet, create one
|
|
19
|
+
if ! tmux list-windows -F '#W' 2>/dev/null | grep -qx "$WINDOW_NAME"; then
|
|
20
|
+
tmux new-window -n "$WINDOW_NAME" -c "$working_dir"
|
|
21
|
+
else
|
|
22
|
+
# Split the current window to add a pane
|
|
23
|
+
tmux split-window -t "$WINDOW_NAME" -c "$working_dir"
|
|
24
|
+
fi
|
|
25
|
+
|
|
26
|
+
sleep 0.1
|
|
27
|
+
|
|
28
|
+
# Set the pane title
|
|
29
|
+
tmux send-keys -t "$WINDOW_NAME" "printf '\\033]2;${name}\\033\\\\'" Enter
|
|
30
|
+
sleep 0.1
|
|
31
|
+
tmux send-keys -t "$WINDOW_NAME" "clear" Enter
|
|
32
|
+
|
|
33
|
+
# Run the command if provided
|
|
34
|
+
if [[ -n "$command" ]]; then
|
|
35
|
+
sleep 0.1
|
|
36
|
+
tmux send-keys -t "$WINDOW_NAME" "$command" Enter
|
|
37
|
+
fi
|
|
38
|
+
|
|
39
|
+
# Re-tile after adding each pane
|
|
40
|
+
tmux select-layout -t "$WINDOW_NAME" tiled 2>/dev/null || true
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
list_agents() {
|
|
44
|
+
# List all panes in the window with their titles
|
|
45
|
+
if tmux list-windows -F '#W' 2>/dev/null | grep -qx "$WINDOW_NAME"; then
|
|
46
|
+
tmux list-panes -t "$WINDOW_NAME" -F '#{pane_index}: #{pane_title} (#{pane_current_command})' 2>/dev/null
|
|
47
|
+
fi
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
kill_agent() {
|
|
51
|
+
local name="$1"
|
|
52
|
+
|
|
53
|
+
if ! tmux list-windows -F '#W' 2>/dev/null | grep -qx "$WINDOW_NAME"; then
|
|
54
|
+
return 1
|
|
55
|
+
fi
|
|
56
|
+
|
|
57
|
+
# Find the pane with the matching title
|
|
58
|
+
local pane_id
|
|
59
|
+
pane_id=$(tmux list-panes -t "$WINDOW_NAME" -F '#{pane_id} #{pane_title}' 2>/dev/null \
|
|
60
|
+
| grep " ${name}$" | head -1 | cut -d' ' -f1)
|
|
61
|
+
|
|
62
|
+
if [[ -n "$pane_id" ]]; then
|
|
63
|
+
tmux kill-pane -t "$pane_id"
|
|
64
|
+
return 0
|
|
65
|
+
fi
|
|
66
|
+
return 1
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
focus_agent() {
|
|
70
|
+
local name="$1"
|
|
71
|
+
|
|
72
|
+
if ! tmux list-windows -F '#W' 2>/dev/null | grep -qx "$WINDOW_NAME"; then
|
|
73
|
+
return 1
|
|
74
|
+
fi
|
|
75
|
+
|
|
76
|
+
# Find the pane with the matching title
|
|
77
|
+
local pane_index
|
|
78
|
+
pane_index=$(tmux list-panes -t "$WINDOW_NAME" -F '#{pane_index} #{pane_title}' 2>/dev/null \
|
|
79
|
+
| grep " ${name}$" | head -1 | cut -d' ' -f1)
|
|
80
|
+
|
|
81
|
+
if [[ -n "$pane_index" ]]; then
|
|
82
|
+
tmux select-window -t "$WINDOW_NAME"
|
|
83
|
+
tmux select-pane -t "$WINDOW_NAME.$pane_index"
|
|
84
|
+
return 0
|
|
85
|
+
fi
|
|
86
|
+
return 1
|
|
87
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ vercel-deploy.sh — Deploy adapter for Vercel ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Sourced by shipwright init --deploy to generate platform-specific commands. ║
|
|
6
|
+
# ║ Exports: staging_cmd, production_cmd, rollback_cmd, detect_platform ║
|
|
7
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
+
|
|
9
|
+
adapter_name="vercel"
|
|
10
|
+
|
|
11
|
+
detect_platform() {
|
|
12
|
+
# Vercel: look for vercel.json or .vercel/ directory
|
|
13
|
+
[[ -f "vercel.json" ]] || [[ -d ".vercel" ]]
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
get_staging_cmd() {
|
|
17
|
+
echo "vercel deploy --yes 2>&1 | tee .claude/pipeline-artifacts/deploy-staging.log"
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
get_production_cmd() {
|
|
21
|
+
echo "vercel deploy --prod --yes 2>&1 | tee .claude/pipeline-artifacts/deploy-prod.log"
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
get_rollback_cmd() {
|
|
25
|
+
echo "vercel rollback --yes"
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
get_health_url() {
|
|
29
|
+
# Vercel provides a preview URL from the deploy output
|
|
30
|
+
echo ""
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
get_smoke_cmd() {
|
|
34
|
+
echo "curl -sf \$(vercel ls --json 2>/dev/null | jq -r '.[0].url // empty') > /dev/null"
|
|
35
|
+
}
|