@opensassi/opencode 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/dashboard/dashboard.e2e.test.ts +247 -0
  2. package/dashboard/dist/index.d.ts +9 -0
  3. package/dashboard/dist/index.js +36 -0
  4. package/dashboard/dist/routes/api.d.ts +2 -0
  5. package/dashboard/dist/routes/api.js +215 -0
  6. package/dashboard/dist/services/cache.d.ts +13 -0
  7. package/dashboard/dist/services/cache.js +29 -0
  8. package/dashboard/dist/services/experiments.d.ts +11 -0
  9. package/dashboard/dist/services/experiments.js +108 -0
  10. package/dashboard/dist/services/git.d.ts +12 -0
  11. package/dashboard/dist/services/git.js +149 -0
  12. package/dashboard/dist/services/sessions.d.ts +25 -0
  13. package/dashboard/dist/services/sessions.js +208 -0
  14. package/dashboard/dist/services/specs.d.ts +9 -0
  15. package/dashboard/dist/services/specs.js +102 -0
  16. package/dashboard/dist/types.d.ts +173 -0
  17. package/dashboard/dist/types.js +1 -0
  18. package/dashboard/opencode.e2e.test.ts +100 -0
  19. package/dashboard/playwright.config.ts +11 -0
  20. package/dashboard/public/app.js +961 -0
  21. package/dashboard/public/index.html +29 -0
  22. package/dashboard/public/style.css +231 -0
  23. package/dashboard/src/index.ts +53 -0
  24. package/dashboard/src/routes/api.ts +235 -0
  25. package/dashboard/src/services/cache.ts +38 -0
  26. package/dashboard/src/services/experiments.ts +117 -0
  27. package/dashboard/src/services/git.ts +139 -0
  28. package/dashboard/src/services/sessions.ts +216 -0
  29. package/dashboard/src/services/specs.ts +95 -0
  30. package/dashboard/src/types.ts +168 -0
  31. package/dashboard/technical-specification.md +414 -0
  32. package/dashboard/test-api.sh +127 -0
  33. package/dashboard/tsconfig.json +16 -0
  34. package/lib/util/paths.js +9 -1
  35. package/package.json +9 -1
  36. package/scripts/dashboard.js +17 -0
  37. package/scripts/generate-daily-summaries.js +190 -0
  38. package/skills/opensassi/SKILL.md +150 -56
  39. package/skills/todo/SKILL.md +45 -63
  40. package/skills-index.json +10 -7
@@ -0,0 +1,190 @@
1
+ #!/usr/bin/env node
2
+ import { readFileSync, readdirSync, writeFileSync, existsSync, mkdirSync } from 'node:fs';
3
+ import { join, resolve, basename } from 'node:path';
4
+ import { execFileSync } from 'node:child_process';
5
+
6
+ const SESSIONS_DIR = resolve(process.cwd(), 'sessions');
7
+
8
+ function parseMdField(content, label) {
9
+ const re = new RegExp(`\\*\\*${label}:\\*\\*\\s*(.+?)(?:\\n\\n|\\n(?=\\*\\*)|$)`, 's');
10
+ const m = content.match(re);
11
+ return m ? m[1].trim() : null;
12
+ }
13
+
14
+ function parseDurationMinutes(content) {
15
+ // Try **Duration:** HH<something>
16
+ const durMatch = content.match(/\*\*Duration:\*\*\s*(\d+)\s*min/);
17
+ if (durMatch) return parseInt(durMatch[1], 10);
18
+
19
+ // Fallback: from .json.bz2 (handled below)
20
+ return 0;
21
+ }
22
+
23
+ function parsePrompterMinutes(content) {
24
+ const totalMatch = content.match(/\*\*Total:\*\*\s*([\d.]+)\s*hours?/);
25
+ if (totalMatch) return Math.round(parseFloat(totalMatch[1]) * 60);
26
+ // also try: prompter active ≈ X hours
27
+ const activeMatch = content.match(/prompter active\s*[≈~]?\s*([\d.]+)\s*hours?/);
28
+ if (activeMatch) return Math.round(parseFloat(activeMatch[1]) * 60);
29
+ return 30;
30
+ }
31
+
32
+ function parseSmeHours(content) {
33
+ const smeMatch = content.match(/\*\*Model-Equivalent SME Time Estimate:\*\*\s*(?:~|≈)?\s*([\d.]+)\s*hours?/);
34
+ if (smeMatch) return Math.round(parseFloat(smeMatch[1]) * 60);
35
+ return 60;
36
+ }
37
+
38
+ function parseTags(content) {
39
+ const tagMatch = content.match(/\*\*Aggregation Tags:\*\*\s*(.+?)(?:\\n\\n|$)/s);
40
+ if (tagMatch) {
41
+ return tagMatch[1].split(',').map(t => t.trim()).filter(Boolean);
42
+ }
43
+ return [];
44
+ }
45
+
46
+ function parseSessionId(content) {
47
+ return parseMdField(content, 'Session ID') || '';
48
+ }
49
+
50
+ function parseDescription(content) {
51
+ return parseMdField(content, 'Top-Level Component') || '';
52
+ }
53
+
54
+ function parseConfidence(/*content*/) {
55
+ return 'medium';
56
+ }
57
+
58
+ function extractDate(sessionId, filename) {
59
+ if (sessionId) {
60
+ const m = sessionId.match(/^(\d{4}-\d{2}-\d{2})/);
61
+ if (m) return m[1];
62
+ }
63
+ const m = filename.match(/^(\d{4}-\d{2}-\d{2})/);
64
+ return m ? m[1] : null;
65
+ }
66
+
67
+ function getBz2Duration(sessionDir, sessionId) {
68
+ if (!sessionId) return null;
69
+ const files = readdirSync(sessionDir);
70
+ const match = files.find(f => f.startsWith(sessionId) && f.endsWith('.json.bz2'));
71
+ if (!match) return null;
72
+ try {
73
+ const buf = execFileSync('bzcat', [join(sessionDir, match)], {
74
+ encoding: 'utf-8', maxBuffer: 100 * 1024 * 1024, stdio: ['pipe', 'pipe', 'ignore'],
75
+ });
76
+ const d = JSON.parse(buf);
77
+ const info = d.info || {};
78
+ const t = info.time || {};
79
+ const created = t.created || 0;
80
+ const updated = t.updated || 0;
81
+ if (updated && created) return Math.round((updated - created) / 60000);
82
+ return null;
83
+ } catch { return null; }
84
+ }
85
+
86
+ async function main() {
87
+ if (!existsSync(SESSIONS_DIR)) {
88
+ console.error(`Sessions directory not found: ${SESSIONS_DIR}`);
89
+ process.exit(1);
90
+ }
91
+
92
+ const files = readdirSync(SESSIONS_DIR);
93
+ const mdFiles = files.filter(f => f.endsWith('.md') && !f.endsWith('.spec.md') && f !== 'README.md' && f !== 'export-session.sh');
94
+
95
+ // Group sessions by date
96
+ const byDate = {};
97
+
98
+ for (const mdFile of mdFiles) {
99
+ const content = readFileSync(join(SESSIONS_DIR, mdFile), 'utf-8');
100
+ const sessionId = parseSessionId(content);
101
+ const date = extractDate(sessionId, mdFile);
102
+ if (!date) { console.error(`Could not extract date from ${mdFile}`); continue; }
103
+
104
+ const prompterMin = parsePrompterMinutes(content);
105
+ const smeMin = parseSmeHours(content);
106
+ const bz2Duration = getBz2Duration(SESSIONS_DIR, sessionId);
107
+ const durationMin = bz2Duration || prompterMin || parseDurationMinutes(content);
108
+ const tags = parseTags(content);
109
+ const description = parseDescription(content);
110
+ const confidence = parseConfidence(content);
111
+
112
+ if (!byDate[date]) byDate[date] = [];
113
+ byDate[date].push({
114
+ session_id: sessionId || mdFile.replace(/\.md$/, ''),
115
+ duration_minutes: durationMin,
116
+ prompter_time_minutes: Math.min(prompterMin, durationMin || prompterMin),
117
+ sme_time_minutes: smeMin,
118
+ top_component_summary: description,
119
+ tags,
120
+ human_confidence: confidence,
121
+ });
122
+ }
123
+
124
+ // Generate daily files
125
+ const dailyDir = join(SESSIONS_DIR, 'daily');
126
+ if (!existsSync(dailyDir)) {
127
+ mkdirSync(dailyDir, { recursive: true });
128
+ }
129
+
130
+ for (const [date, sessions] of Object.entries(byDate).sort()) {
131
+ let totalPrompter = 0, totalSme = 0;
132
+ for (const s of sessions) {
133
+ totalPrompter += s.prompter_time_minutes;
134
+ totalSme += s.sme_time_minutes;
135
+ }
136
+ const totalPrompterHrs = Math.round((totalPrompter / 60) * 10) / 10;
137
+ const totalSmeHrs = Math.round((totalSme / 60) * 10) / 10;
138
+
139
+ // Compute per-tag aggregates
140
+ const tagMap = {};
141
+ for (const s of sessions) {
142
+ const perTagSme = s.sme_time_minutes / (s.tags.length || 1);
143
+ const perTagPrompter = s.prompter_time_minutes / (s.tags.length || 1);
144
+ for (const tag of s.tags) {
145
+ if (!tagMap[tag]) tagMap[tag] = { prompter: 0, sme: 0 };
146
+ tagMap[tag].prompter += perTagPrompter;
147
+ tagMap[tag].sme += perTagSme;
148
+ }
149
+ }
150
+ const topSubjectAreas = Object.entries(tagMap)
151
+ .map(([name, v]) => ({
152
+ name,
153
+ prompter_time_hours: Math.round((v.prompter / 60) * 100) / 100,
154
+ sme_time_hours: Math.round((v.sme / 60) * 100) / 100,
155
+ ai_multiplier: v.prompter > 0 ? Math.round((v.sme / v.prompter) * 10) / 10 : 0,
156
+ }))
157
+ .sort((a, b) => b.sme_time_hours - a.sme_time_hours);
158
+
159
+ const aiMultiplier = totalPrompterHrs > 0
160
+ ? Math.round((totalSmeHrs / totalPrompterHrs) * 10) / 10
161
+ : 0;
162
+
163
+ const daily = {
164
+ dashboard: {
165
+ metadata: {
166
+ generated_at: new Date().toISOString(),
167
+ audited: false,
168
+ audit_note: 'Auto-generated from session evaluation files',
169
+ },
170
+ daily_summary: {
171
+ date,
172
+ total_prompter_time_hours: totalPrompterHrs,
173
+ total_sme_time_hours: totalSmeHrs,
174
+ ai_multiplier: aiMultiplier,
175
+ total_sessions: sessions.length,
176
+ top_subject_areas: topSubjectAreas,
177
+ },
178
+ session_breakdown: sessions,
179
+ },
180
+ };
181
+
182
+ const outPath = join(dailyDir, `${date}.json`);
183
+ writeFileSync(outPath, JSON.stringify(daily, null, 2) + '\n');
184
+ console.log(`Wrote ${outPath} (${sessions.length} sessions, ${totalPrompterHrs}h prompter, ${totalSmeHrs}h SME)`);
185
+ }
186
+
187
+ console.log(`\nDone. Generated ${Object.keys(byDate).length} daily summaries.`);
188
+ }
189
+
190
+ main().catch(e => { console.error(e); process.exit(1); });
@@ -1,75 +1,169 @@
1
1
  ---
2
2
  name: opensassi
3
- description: Bootstrap a new project environment detect OS, install toolchain (git, Node.js via nvm/LTS), clone FlameGraph, set up project infrastructure
3
+ description: Root skill ecosystemloads system-design + spec tree, routes sub-skill composition, bootstraps environments
4
4
  ---
5
5
 
6
- # Skill: opensassi
6
+ # Skill: opensassi — Root Skill Ecosystem
7
7
 
8
- ## Persona
8
+ ## Entry Point
9
9
 
10
- Senior DevOps engineer specializing in cross-platform development environment provisioning, with deep expertise in nvm, package managers (apt/dnf/yum/pacman/brew/choco), and build toolchain setup.
10
+ | Input | Action |
11
+ |-------|--------|
12
+ | `/opensassi` | Load `skill system-design`, read `technical-specification.md` + spec tree depth 2 (root + facade specs). Report ready. |
13
+ | `/opensassi init` | Run `env-check.sh`. Parse JSON result: if node+git+FlameGraph+deps all present → "Already bootstrapped". Otherwise run full bootstrap sequence (env-check → install → flamegraph → npm-deps → gitignore). |
14
+ | `/opensassi <skill> <command> [args]` | Load `<skill>` from npm via `npx @opensassi/opencode <skill>`, then run `<command>` with `[args]`. Return result. |
11
15
 
12
- ## On Activation
16
+ ### Spec tree depth
13
17
 
14
- 1. Show the skills-index (from `skills-index.json` or by running `npx @opensassi/opencode opensassi --print-index`)
15
- 2. Run `init check` to report current environment status (OS, Node.js, git, FlameGraph, npm deps)
16
- 3. Show available commands
18
+ Depth is controlled by `--depth` flag on `load spec`:
19
+ - Depth 1: `technical-specification.md` only
20
+ - Depth 2 (default): + sub-module facade `.spec.md` files
21
+ - Depth 3: + internal component `.spec.md` files
22
+ - Depth 4: + full file-level `.spec.md` tree
17
23
 
18
- Available sub-skills: asm-optimizer, daily-evaluation, git, issue, npx, npm-optimizer, profiler, session-evaluation, skill-manager, system-design, system-design-review, todo
24
+ ## Init
19
25
 
20
- To load a sub-skill, run:
21
- ```
22
- npx @opensassi/opencode <skill-name>
23
- ```
24
- and read the output as the skill's full instructions.
25
-
26
- The `npx` sub-skill provides cross-project dispatch — it runs `@opensassi/opencode` commands in a different target directory (handy for operating on `external/` projects from the root).
27
-
28
- ## Dependencies
26
+ Single shell command: `npx @opensassi/opencode run --skill opensassi env-check.sh`
29
27
 
30
- - `bash` or `powershell` (for bootstrap scripts zero other deps)
31
- - `git` (installed by bootstrap if missing)
32
- - The `@opensassi/opencode` npm package (scripts resolve via `npx @opensassi/opencode run --skill opensassi <name>`)
28
+ Returns JSON: `{"os": ..., "distro": ..., "node_version": ..., "git_version": ..., ...}`
33
29
 
34
- ## Commands
35
-
36
- ### `init`
30
+ **Interpretation:**
31
+ ```
32
+ bootstrapped = (node_version != "" && git_version != ""
33
+ && scripts/FlameGraph/ exists
34
+ && node_modules/ exists)
35
+ ```
37
36
 
38
- Execute companion scripts from the `@opensassi/opencode` package. If a script is missing or a platform installer does not exist, report the gap and continue; do not generate files.
37
+ If bootstrapped report "Environment ready." + show node/git versions.
38
+ If not → run full bootstrap:
39
39
 
40
- 1. `npx @opensassi/opencode run --skill opensassi env-check.sh` (or `env-check.ps1` on Windows) bootstrap git + Node.js LTS (creates `.nvmrc` if missing)
41
- 2. `init install` — run existing platform installer, or report if none found
42
- 3. `init flamegraph` — clone FlameGraph v1.0
40
+ 1. `npx @opensassi/opencode run --skill opensassi env-check.sh` — install git + Node.js LTS if missing, write `.nvmrc`
41
+ 2. `init install` — run platform-specific installer (cmake, nasm, gdb, ripgrep, perf, htop, etc.) or report none found
42
+ 3. `init flamegraph` — clone FlameGraph v1.0 to `scripts/FlameGraph/`
43
43
  4. `npx @opensassi/opencode run --skill opensassi install-npm-deps.sh` — `npm install`
44
44
  5. `npx @opensassi/opencode run --skill opensassi ensure-gitignore.sh` — append common patterns
45
45
 
46
- ### `init install`
47
-
48
- Install the development environment toolchain.
49
-
50
- **Flow:**
51
-
52
- 1. **Detect environment** — run `npx @opensassi/opencode run --skill opensassi env-check.sh` (Linux/macOS/WSL/Git Bash) or fall back to `env-check.ps1` (Windows native). Both output structured JSON.
53
- 2. **Check for existing installer** — look for `npx @opensassi/opencode run install.sh` for platform-specific installers from the package's `scripts/install/` directory
54
- 3. **If installer exists** run it (installs: cmake, nasm, gdb, ripgrep, perf, htop, etc.)
55
- 4. **If installer NOT found**:
56
- a. Report: "No installer found for this platform"
57
- b. Continue env-check already installed git + Node.js, which is sufficient for the project to function
58
-
59
- ### `init flamegraph`
60
-
61
- Clone Brendan Gregg's FlameGraph at pinned tag `v1.0` to `scripts/FlameGraph/`:
62
- - If `scripts/FlameGraph/` does not exist: `git clone --depth=1 --branch v1.0`
63
- - If it exists: `git fetch --tags --depth=1 && git checkout v1.0`
64
-
65
- ### `init check`
66
-
67
- Run `npx @opensassi/opencode run --skill opensassi env-check.sh` (or `env-check.ps1`) and verify:
68
- - Node.js version (LTS or later)
69
- - git availability
70
- - FlameGraph presence at `scripts/FlameGraph/`
71
- - npm deps installed (`node_modules/` exists)
72
- - `.gitignore` has common patterns
46
+ ## Lexicon
47
+
48
+ | Skill | Command | Arguments | Description |
49
+ |-------|---------|-----------|-------------|
50
+ | **system-design** | `load spec` | `[--depth 1-4]` | Load spec tree into context (tail — permanent base) |
51
+ | | `generate from source` | — | Build spec tree from source files |
52
+ | | `generate technical specification` | | Produce complete class spec + diagrams + test plan |
53
+ | | `revise technical specification` | | Propose structured revisions list |
54
+ | | `generate sequence diagram` | | Mermaid sequence diagram for data flow |
55
+ | | `generate architecture diagram` | — | Mermaid C4 container/component diagram |
56
+ | | `generate class specification` | | Complete C++ class declarations |
57
+ | | `generate d3 animation` | | Self-contained HTML D3.js animation |
58
+ | | `generate testing plan` | — | Structured unit/integration/regression tests |
59
+ | | `split sub-modules` | — | Break monolithic spec into sub-module directory |
60
+ | | `combine sub-modules` | — | Flatten sub-module spec back to monolithic |
61
+ | | `list sub-modules` | | List all sub-modules with facade classes |
62
+ | | `load sub-module spec` | `<path>` | Load one sub-module `.spec.md` |
63
+ | | `generate sub-module spec` | `<name>` | Generate `.spec.md` for a named sub-module |
64
+ | **git** | `start session` | — | `git checkout main` → `git pull --rebase`, verify clean tree |
65
+ | | `finish session` | — | add → commit → rebase → test → eval → push (single atomic commit) |
66
+ | | `sync` | — | `git fetch origin` → `git rebase origin/main` → test |
67
+ | **issue** | `create issue` | `<body>` | Create GitHub issue from structured body |
68
+ | | `list issues` | `[--limit N]` | List recent GitHub issues |
69
+ | | `show issue` | `<number>` | Show issue details and status |
70
+ | | `close issue` | `<number>` | Close issue with comment |
71
+ | **npm-optimizer** | `execute` | — | Full port pipeline: discover → ceiling → naive → profile → classify → pivot/micro → shim → report |
72
+ | | `assess-ceiling` | | Build N-API pass-through, measure upper bound |
73
+ | | `implement-naive` | — | Scaffold simplest C++ addon passing 100% tests |
74
+ | | `classify` | — | Sort perf samples Tier 1/2/3, decide pivot vs micro |
75
+ | | `pivot` | — | Architectural pivot when N-API/V8 is bottleneck |
76
+ | | `micro-optimize` | — | Iterative C++ micro-opt with 3-strikes rule |
77
+ | | `shim` | — | JS compatibility wrapper + cross-reference docs |
78
+ | | `bench` | — | Benchmark against original JS baseline |
79
+ | | `assess-handoff` | — | Gate: evaluate dropping to asm-optimizer |
80
+ | | `report` | — | Final comparison table + archive |
81
+ | | `show-state` | — | Pipeline progress |
82
+ | **profiler** | `check` | — | Verify perf toolchain available |
83
+ | | `setup` | `[--frames N]` | Download test data, prepare profiling environment |
84
+ | | `profile` | `[--events ...]` | `perf record` → flamegraph |
85
+ | | `benchmark` | `[--iter N]` | Run N iterations with metric collection |
86
+ | | `compare` | `<baseline> <candidate>` | Side-by-side benchmark comparison |
87
+ | | `report` | `[--profile <label>]` | Bundle profiling session into report |
88
+ | **asm-optimizer** | `setup-baseline` | — | Create baseline dirs, clone release, build, run profiling matrix |
89
+ | | `profile` | `<name>` | Maximal perf counter dump against baseline |
90
+ | | `assess` | `<entry>` | Evaluate one function's ASM optimization potential |
91
+ | | `assess all` | — | Rank all candidate functions by potential |
92
+ | | `setup-microbench` | `<entry>` | Create isolated microbenchmark harness |
93
+ | | `spec` | `<entry>` | Generate technical spec of C++ implementation |
94
+ | | `analyze-gap` | `<entry>` | Compare ASM implementation against C++ spec |
95
+ | | `bench` | `<entry>` | Run microbenchmark, compare against C++ baseline |
96
+ | | `implement` | `<entry>` | Generate ASM implementation following spec-first process |
97
+ | | `iterative-optimize` | `<entry>` | Full optimization pipeline with experiment archiving |
98
+ | | `archive-experiment` | `<entry>` | Save experiment record when hypothesis fails |
99
+ | | `report` | `[--format markdown\|json]` | Optimization report for all assessed entries |
100
+ | **todo** | `extract` | `<name>` | Scan session context for unfinished work → structured summary |
101
+ | | `propose-todo` | `<name>` | Draft todo entry from extract output |
102
+ | | `save-todo` | — | Write to `todos/<NNN>-<name>.md` |
103
+ | | `load-todo` | `<id>` | Read todo file into context for agent to act on |
104
+ | | `list-todos` | — | List all saved todo entries |
105
+ | **session-evaluation** | `generate` | — | Analyze conversation, produce structured session evaluation |
106
+ | | `export` | — | Save evaluation + compressed session archive to `sessions/` |
107
+ | **skill-manager** | `show skills` | — | List all registered skills |
108
+ | | `create skill` | — | Interactive skill creation flow |
109
+ | | `revise skill` | `<name>` | Interactive skill revision |
110
+ | | `save skill` | — | Write skill to disk + register |
111
+ | | `delete skill` | `<name>` | Remove skill from disk |
112
+ | | `commit` | — | Stage + commit all skill changes |
113
+ | | `audit skills` | — | Validate all skill files for consistency |
114
+ | **system-design-review** | *(no commands defined)* | — | Seven-expert panel audit of technical specs |
115
+ | **daily-evaluation** | *(no commands defined)* | — | Aggregate session evaluations into dashboards |
116
+ | **npx** | `npx <target> <cmd>` | `<target> <cmd>` | Run npx command in target directory |
117
+ | | `npx . <cmd>` | `<cmd>` | Run npx command in current directory |
118
+ | | `npx list` | — | List available target directories |
119
+
120
+ ## Composition Patterns
121
+
122
+ Common requests map to skill compositions. Load order: permanent base (tail) at bottom, JIT skills (head) at top.
123
+
124
+ | User says | Skill stack (head ← tail) | Commands |
125
+ |-----------|---------------------------|----------|
126
+ | "start a session" | git → system-design+spec | `start session` |
127
+ | "finish the session" | session-evaluation → git → system-design+spec | `generate` → `finish session` → `export` |
128
+ | "load the last issue" | issue → system-design+spec | `list issues` → `show issue <N>` |
129
+ | "create an issue from context" | todo → issue → system-design+spec | `extract <name>` → `create issue <body>` → `save-todo` |
130
+ | "show pending todos" | todo → system-design+spec | `list-todos` |
131
+ | "load a todo and work on it" | todo → system-design+spec | `load-todo <id>` → agent acts on content |
132
+ | "port an npm package" | npm-optimizer → system-design+spec | `execute` |
133
+ | "hand off to asm" | asm-optimizer → npm-optimizer → system-design+spec | `assess-handoff` |
134
+ | "profile the encoder" | profiler → system-design+spec | `check` → `profile` |
135
+ | "optimize a hot function" | asm-optimizer → system-design+spec | `assess <entry>` → `iterative-optimize <entry>` |
136
+ | "create a debugging todo" | todo → asm-optimizer → system-design+spec | `extract` → `propose-todo` → `save-todo` |
137
+ | "save a note" | todo → system-design+spec | (treat free text as note → `extract` → `propose-todo` → `save-todo`) |
138
+
139
+ ## Interpretation
140
+
141
+ Parse user text into skill compositions:
142
+
143
+ 1. **Explicit routing** — If prefixed with `/opensassi`, dispatch directly from the Entry Point table.
144
+
145
+ 2. **Keyword matching** — Scan user text for Lexicon command names and skill domains:
146
+ - "issue", "bug", "ticket" → `issue` skill
147
+ - "git", "commit", "push", "rebase" → `git` skill
148
+ - "profile", "perf", "flamegraph" → `profiler` skill
149
+ - "asm", "assembly", "SIMD", "optimize function" → `asm-optimizer` skill
150
+ - "npm", "port", "native addon" → `npm-optimizer` skill
151
+ - "todo", "note", "deferred", "remaining" → `todo` skill
152
+ - "spec", "diagram", "design" → `system-design` skill
153
+ - "session eval", "report card" → `session-evaluation` skill
154
+ - "skill", "manage skills" → `skill-manager` skill
155
+
156
+ 3. **Pattern matching** — Match multi-keyword phrases against Composition Patterns. If no direct match, compose by chaining relevant skills.
157
+
158
+ 4. **Unknown requests** — Reference the Lexicon table and ask: "I see you want to [paraphrase]. Do you mean one of these: [list 2-3 matching skills]?"
159
+
160
+ 5. **Permanent base** — Always keep `system-design` + spec tree loaded (tail of context). Only JIT-load the head skills needed for the current task.
161
+
162
+ ## Context Architecture
163
+
164
+ - **Tail (permanent base):** `system-design` skill + spec tree. Loaded at `/opensassi`. Self-repropagating tokens designed for long-context survival.
165
+ - **Head (JIT-loaded):** Specific skill instructions loaded per phase. Strongest attention, loaded last.
166
+ - **Sub-agent loading contracts:** When spawning phase sub-agents, load skills in deterministic order for KV cache reuse (detailed in `npm-optimizer` SKILL.md).
73
167
 
74
168
  ## Design Principles
75
169
 
@@ -78,4 +172,4 @@ Run `npx @opensassi/opencode run --skill opensassi env-check.sh` (or `env-check.
78
172
  - **`.nvmrc` for the project** — Written with `--lts` so `nvm use` auto-selects when entering the project directory.
79
173
  - **FlameGraph pinned at v1.0** — Tag is stable; pinned clones are idempotent.
80
174
  - **`install.ps1` is WSL-only** — Not modified by this skill. Windows-native installer is a future extension.
81
- - **env-check scripts output JSON** — Structured `{os, distro, version, codename, pkg_manager, shell, is_wsl, arch, node_version, nvm_version, git_version}` for AI agent consumption.
175
+ - **env-check scripts output JSON** — Structured for consumption by the skill's interpretation logic.
@@ -1,20 +1,18 @@
1
1
  ---
2
2
  name: todo
3
- description: Creates linked GitHub issues and debugging skills from session context for unfinished work
3
+ description: Extract unfinished work from session context, create GitHub issues, and save structured todo entries to `todos/` for future agents
4
4
  ---
5
5
 
6
6
  # Skill: todo
7
7
 
8
8
  ## Persona
9
9
 
10
- Technical writer and project manager — extracts structured summaries from session conversations, then produces self-contained debugging skills for future agents.
10
+ Technical writer and project manager — extracts structured summaries from session conversations, creates GitHub issues, and saves sequential todo entries to `todos/` for future agent use.
11
11
 
12
12
  ## Dependencies
13
13
 
14
- Requires the **issue** skill — the `create issue` command from that skill handles the issue side.
15
- This skill adds the skill-creation half.
16
-
17
- Load both skills on activation:
14
+ Requires the **issue** skill — `create issue` handles the GitHub side.
15
+ Load both on activation:
18
16
  ```
19
17
  skill issue
20
18
  skill todo
@@ -23,16 +21,17 @@ skill todo
23
21
  ## On Activation
24
22
 
25
23
  Show the workflow:
26
- 1. `extract <name>` — analyze session context
27
- 2. Load `issue` skill → `create issue <body>` — creates the GitHub issue
28
- 3. Return here → `propose-skill <name>` — drafts the debugging skill
29
- 4. `save-skill` — writes skill to disk
24
+ 1. `extract <name>` — analyze session context for unfinished work
25
+ 2. `propose-todo <name>` — draft a structured todo entry
26
+ 3. Load `issue` skill → `create issue <body>` — creates the GitHub issue, note the issue #
27
+ 4. `save-todo` — write to `todos/` with sequential numbering
28
+ 5. `load-todo <id>` or `list-todos` — retrieve saved work
30
29
 
31
30
  ## Commands
32
31
 
33
32
  ### `extract <name>`
34
33
 
35
- Scan the current session context for unfinished work, bugs, deferred items. Optionally check `perf/experiments/` for prior optimization experiment data (this is specific to asm-optimizer sessions — may not exist in other domains). Output a structured summary with two sections:
34
+ Scan the current session context for unfinished work, bugs, deferred items. Optionally check `perf/experiments/` for prior optimization experiment data (asm-optimizer specific — may not exist in other domains). Output a structured summary with two sections:
36
35
 
37
36
  #### Issue Body (formatted for the `issue` skill's `create issue` command)
38
37
 
@@ -57,7 +56,7 @@ Scan the current session context for unfinished work, bugs, deferred items. Opti
57
56
  - [ ] <criterion 2>
58
57
  ```
59
58
 
60
- #### Skill Content (for `propose-skill`)
59
+ #### Todo Content (for `propose-todo`)
61
60
 
62
61
  ```
63
62
  ### What Succeeded
@@ -76,28 +75,17 @@ Scan the current session context for unfinished work, bugs, deferred items. Opti
76
75
  <paths to any domain-specific experiment directories, if available>
77
76
  ```
78
77
 
79
- ### `propose-skill <name>`
80
-
81
- From the extract output + an existing issue number (obtained by running `create issue` from the `issue` skill), draft a skill at `.opencode/skills/<name>-<issue#>/SKILL.md` following the established pattern:
78
+ ### `propose-todo <name>`
82
79
 
83
- ```
84
- ---
85
- name: <name>-<issue#>
86
- description: <one-line summary linking to GitHub issue #N>
87
- ---
80
+ From the extract output + an existing issue number (obtained by running `create issue` from the `issue` skill), draft a todo entry following this format:
88
81
 
89
- # Skill: <name>-<issue#>
82
+ ```markdown
83
+ # <NNN>-<name>
90
84
 
91
85
  ## Issue Reference
92
-
93
86
  GitHub Issue: https://github.com/<owner>/<repo>/issues/<N>
94
87
 
95
- ## Dependencies
96
-
97
- Requires: **<parent-skill>** — load this skill first via `skill <parent>`.
98
-
99
88
  ## Previous Work
100
-
101
89
  ### What Succeeded
102
90
  <from extract output>
103
91
 
@@ -107,59 +95,53 @@ Requires: **<parent-skill>** — load this skill first via `skill <parent>`.
107
95
  ### What Remains
108
96
  <from extract output>
109
97
 
110
- ## Persona
98
+ ## Key Technical Details
99
+ <from extract output>
111
100
 
112
- <domain-specific persona for the follow-up agent>
101
+ ## Experiment References
102
+ <from extract output>
103
+ ```
113
104
 
114
- ## On Activation
105
+ Present for review. Does not write until `save-todo`.
115
106
 
116
- <steps to take when loaded>
107
+ ### `save-todo`
117
108
 
118
- ## Commands
109
+ Write the currently agreed todo entry to `todos/<NNN>-<name>.md`:
119
110
 
120
- - `setup` rebuild and prepare
121
- - `test` run bit-exact comparison
122
- - `gdb-trace <phase>` debug specific phase
123
- - `fix <strategy>` — apply known fix strategy
124
- - `bench` — microbenchmark with perf stat
125
- - `report-fix` — validate, wire, close issue
111
+ 1. Scan `todos/` directory for existing files.
112
+ 2. Determine the next sequential number (e.g., `todos/` has `001-*.md`, `002-*.md` next is `003`).
113
+ 3. Write the file using the format from `propose-todo` output.
114
+ 4. Confirm: `Saved todos/003-<name>.md`.
126
115
 
127
- ## Debugging Context
116
+ ### `load-todo <id>`
128
117
 
129
- <known-correct intermediate values, register dumps, algorithm traces>
118
+ Read a todo file into context so the agent can act on it as a mini-brief:
130
119
 
131
- ## Files Reference
120
+ 1. Accept a todo identifier: numeric prefix (`003`) or name fragment (`dq-asm`).
121
+ 2. Match against `todos/*.md` files.
122
+ 3. If ambiguous, list matches and ask for refinement.
123
+ 4. Read the matched file in full into context.
124
+ 5. Report: `Loaded todos/003-dq-asm-minselect-debug-3.md — <title>`
132
125
 
133
- <file paths and their roles>
126
+ ### `list-todos`
134
127
 
135
- ## Design Principles
128
+ List all saved todo entries:
136
129
 
137
- <conventions, guardrails>
138
130
  ```
139
-
140
- Present for review. Does not write until `save-skill`.
141
-
142
- ### `save-skill`
143
-
144
- Write the currently agreed SKILL.md content to `.opencode/skills/<name>-<issue#>/SKILL.md` and register in `opencode.json`:
145
-
146
- 1. Validate frontmatter (name and description must be present).
147
- 2. Write the file to `.opencode/skills/<name>-<issue#>/SKILL.md`.
148
- 3. Update `opencode.json` to add `"<name>-<issue#>": "allow"` if not already present. After writing, re-read and confirm.
149
- 4. Confirm the action.
131
+ 001-dq-asm-minselect-debug-3 GitHub #12 — Debug minselect register corruption
132
+ 002-had-avx2-optimization-4 GitHub #15 Port HAD function to AVX2
133
+ 003-scheduler-phase-1-9 GitHub #22 — Implement scheduler dispatch
134
+ ```
150
135
 
151
136
  ## Design Principles
152
137
 
153
- - The **issue** skill handles ALL issue creation — this skill never calls `gh issue create`. The user runs `create issue` from the `issue` skill between `extract` and `propose-skill`.
154
- - The extract's "Issue Body" section is ready for direct use as the `create issue` command's body in the issue skill.
155
- - Session tracking line at the bottom of every issue body (matching issue skill's convention):
138
+ - The **issue** skill handles ALL issue creation — this skill never calls `gh issue create`. The user runs `create issue` from the `issue` skill between `propose-todo` and `save-todo`.
139
+ - Session tracking line at the bottom of every issue body:
156
140
  ```
157
141
  ---
158
-
159
142
  Generated from session `<session-id>` on `<date>`.
160
143
  ```
144
+ - Todo files are flat markdown in `todos/` — no SKILL.md, no persona, no commands. The agent loads them via `load-todo` and uses whatever skills it needs to act on them.
145
+ - Sequential numbering prevents collisions and provides deterministic ordering.
161
146
  - Domain-specific experiment directories (like asm-optimizer's `perf/experiments/`) are OPTIONAL — check existence before referencing, silently omit if absent.
162
- - Skill name bakes in issue number: `<name>-<issue#>`.
163
- - `propose-skill` and `save-skill` are read-only until `save-skill` writes.
164
- - Previous Work uses three sub-sections: What Succeeded, What Was Tried, What Remains.
165
- - Do NOT modify the `skill-manager` or `issue` skills themselves.
147
+ - Do NOT modify the `skill-manager`, `issue`, or other core skills themselves.
package/skills-index.json CHANGED
@@ -70,12 +70,9 @@
70
70
  },
71
71
  {
72
72
  "name": "opensassi",
73
- "description": "Bootstrap a new project environment",
73
+ "description": "Root skill ecosystem loads system-design + spec tree, routes sub-skill composition, bootstraps environments",
74
74
  "commands": [
75
- "init",
76
- "init install",
77
- "init flamegraph",
78
- "init check"
75
+ "init"
79
76
  ]
80
77
  },
81
78
  {
@@ -139,8 +136,14 @@
139
136
  },
140
137
  {
141
138
  "name": "todo",
142
- "description": "Create issues + debugging skills from session context",
143
- "commands": []
139
+ "description": "Extract unfinished work from session context, create GitHub issues, and save structured todo entries to todos/ for future agents",
140
+ "commands": [
141
+ "extract",
142
+ "propose-todo",
143
+ "save-todo",
144
+ "load-todo",
145
+ "list-todos"
146
+ ]
144
147
  }
145
148
  ]
146
149
  }