@mcptoolshop/ai-loadout 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/LICENSE +21 -0
- package/README.md +181 -0
- package/dist/frontmatter.d.ts +13 -0
- package/dist/frontmatter.js +150 -0
- package/dist/index.d.ts +6 -0
- package/dist/index.js +9 -0
- package/dist/match.d.ts +16 -0
- package/dist/match.js +84 -0
- package/dist/tests/frontmatter.test.d.ts +1 -0
- package/dist/tests/frontmatter.test.js +87 -0
- package/dist/tests/match.test.d.ts +1 -0
- package/dist/tests/match.test.js +88 -0
- package/dist/tests/tokens.test.d.ts +1 -0
- package/dist/tests/tokens.test.js +14 -0
- package/dist/tests/validate.test.d.ts +1 -0
- package/dist/tests/validate.test.js +92 -0
- package/dist/tokens.d.ts +7 -0
- package/dist/tokens.js +9 -0
- package/dist/types.d.ts +50 -0
- package/dist/types.js +6 -0
- package/dist/validate.d.ts +17 -0
- package/dist/validate.js +148 -0
- package/package.json +46 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## 1.0.0 — 2026-03-06
|
|
4
|
+
|
|
5
|
+
Initial release.
|
|
6
|
+
|
|
7
|
+
- `LoadoutIndex` schema — dispatch table for agent knowledge
|
|
8
|
+
- `parseFrontmatter()` / `serializeFrontmatter()` — payload file metadata
|
|
9
|
+
- `matchLoadout()` — deterministic keyword + pattern matcher
|
|
10
|
+
- `lookupEntry()` — explicit entry lookup by ID
|
|
11
|
+
- `validateIndex()` — structural linter for index integrity
|
|
12
|
+
- `estimateTokens()` — chars/4 heuristic for budget dashboards
|
|
13
|
+
- Three priority tiers: core / domain / manual
|
|
14
|
+
- Trigger phases: task / plan / edit
|
|
15
|
+
- Zero production dependencies
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 mcp-tool-shop
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="https://raw.githubusercontent.com/mcp-tool-shop-org/brand/main/logos/ai-loadout/readme.png" width="400" alt="ai-loadout">
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
<p align="center">
|
|
6
|
+
<a href="https://github.com/mcp-tool-shop-org/ai-loadout/actions/workflows/ci.yml"><img src="https://github.com/mcp-tool-shop-org/ai-loadout/actions/workflows/ci.yml/badge.svg" alt="CI"></a>
|
|
7
|
+
<a href="https://www.npmjs.com/package/@mcptoolshop/ai-loadout"><img src="https://img.shields.io/npm/v/@mcptoolshop/ai-loadout" alt="npm"></a>
|
|
8
|
+
<a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue" alt="MIT License"></a>
|
|
9
|
+
</p>
|
|
10
|
+
|
|
11
|
+
Context-aware knowledge router for AI agents.
|
|
12
|
+
|
|
13
|
+
`ai-loadout` is the dispatch table format and matching engine that lets AI agents load the right knowledge for the task at hand. Instead of dumping everything into context, you keep a tiny index and load payloads on demand.
|
|
14
|
+
|
|
15
|
+
Think of it like a game loadout — you equip the agent with exactly the knowledge it needs before each mission.
|
|
16
|
+
|
|
17
|
+
## Install
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install @mcptoolshop/ai-loadout
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Core Concepts
|
|
24
|
+
|
|
25
|
+
### The Dispatch Table
|
|
26
|
+
|
|
27
|
+
A `LoadoutIndex` is a structured index of knowledge payloads:
|
|
28
|
+
|
|
29
|
+
```json
|
|
30
|
+
{
|
|
31
|
+
"version": "1.0.0",
|
|
32
|
+
"generated": "2026-03-06T12:00:00Z",
|
|
33
|
+
"entries": [
|
|
34
|
+
{
|
|
35
|
+
"id": "github-actions",
|
|
36
|
+
"path": ".rules/github-actions.md",
|
|
37
|
+
"keywords": ["ci", "workflow", "runner"],
|
|
38
|
+
"patterns": ["ci_pipeline"],
|
|
39
|
+
"priority": "domain",
|
|
40
|
+
"summary": "CI triggers, path gating, runner cost control",
|
|
41
|
+
"triggers": { "task": true, "plan": true, "edit": false },
|
|
42
|
+
"tokens_est": 680,
|
|
43
|
+
"lines": 56
|
|
44
|
+
}
|
|
45
|
+
],
|
|
46
|
+
"budget": {
|
|
47
|
+
"always_loaded_est": 320,
|
|
48
|
+
"on_demand_total_est": 8100,
|
|
49
|
+
"avg_task_load_est": 520,
|
|
50
|
+
"avg_task_load_observed": null
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Priority Tiers
|
|
56
|
+
|
|
57
|
+
| Tier | Behavior | Example |
|
|
58
|
+
|------|----------|---------|
|
|
59
|
+
| `core` | Always loaded | "never skip tests to make CI green" |
|
|
60
|
+
| `domain` | Loaded when task keywords match | CI rules when editing workflows |
|
|
61
|
+
| `manual` | Never auto-loaded, explicit lookup only | Obscure platform gotchas |
|
|
62
|
+
|
|
63
|
+
### Payload Frontmatter
|
|
64
|
+
|
|
65
|
+
Each payload file carries its own routing metadata:
|
|
66
|
+
|
|
67
|
+
```markdown
|
|
68
|
+
---
|
|
69
|
+
id: github-actions
|
|
70
|
+
keywords: [ci, workflow, runner, dependabot]
|
|
71
|
+
patterns: [ci_pipeline]
|
|
72
|
+
priority: domain
|
|
73
|
+
triggers:
|
|
74
|
+
task: true
|
|
75
|
+
plan: true
|
|
76
|
+
edit: false
|
|
77
|
+
---
|
|
78
|
+
|
|
79
|
+
# GitHub Actions Rules
|
|
80
|
+
CI minutes are finite...
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
Frontmatter is the source of truth. The index is derived from it.
|
|
84
|
+
|
|
85
|
+
## API
|
|
86
|
+
|
|
87
|
+
### `matchLoadout(task, index)`
|
|
88
|
+
|
|
89
|
+
Match a task description against a loadout index. Returns entries that should be loaded, ranked by match strength.
|
|
90
|
+
|
|
91
|
+
```typescript
|
|
92
|
+
import { matchLoadout } from "@mcptoolshop/ai-loadout";
|
|
93
|
+
|
|
94
|
+
const results = matchLoadout("fix the CI workflow", index);
|
|
95
|
+
// [{ entry: { id: "github-actions", ... }, score: 0.67, matchedKeywords: ["ci", "workflow"] }]
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
- Core entries always included (score 1.0)
|
|
99
|
+
- Manual entries never auto-included
|
|
100
|
+
- Domain entries scored by keyword overlap + pattern bonus
|
|
101
|
+
- Results sorted by score descending
|
|
102
|
+
|
|
103
|
+
### `lookupEntry(id, index)`
|
|
104
|
+
|
|
105
|
+
Look up a specific entry by ID. For manual entries or explicit access.
|
|
106
|
+
|
|
107
|
+
```typescript
|
|
108
|
+
import { lookupEntry } from "@mcptoolshop/ai-loadout";
|
|
109
|
+
|
|
110
|
+
const entry = lookupEntry("github-actions", index);
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### `parseFrontmatter(content)`
|
|
114
|
+
|
|
115
|
+
Parse YAML-like frontmatter from a payload file.
|
|
116
|
+
|
|
117
|
+
```typescript
|
|
118
|
+
import { parseFrontmatter } from "@mcptoolshop/ai-loadout";
|
|
119
|
+
|
|
120
|
+
const { frontmatter, body } = parseFrontmatter(fileContent);
|
|
121
|
+
if (frontmatter) {
|
|
122
|
+
console.log(frontmatter.id, frontmatter.keywords);
|
|
123
|
+
}
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
### `serializeFrontmatter(fm)`
|
|
127
|
+
|
|
128
|
+
Serialize a `Frontmatter` object back to a string.
|
|
129
|
+
|
|
130
|
+
### `validateIndex(index)`
|
|
131
|
+
|
|
132
|
+
Validate structural integrity of a `LoadoutIndex`. Returns an array of issues.
|
|
133
|
+
|
|
134
|
+
```typescript
|
|
135
|
+
import { validateIndex } from "@mcptoolshop/ai-loadout";
|
|
136
|
+
|
|
137
|
+
const issues = validateIndex(index);
|
|
138
|
+
const errors = issues.filter(i => i.severity === "error");
|
|
139
|
+
if (errors.length > 0) {
|
|
140
|
+
console.error("Index has errors:", errors);
|
|
141
|
+
}
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
Checks: required fields, unique IDs, kebab-case format, summary bounds, keyword presence for domain entries, valid priorities, non-negative budgets.
|
|
145
|
+
|
|
146
|
+
### `estimateTokens(text)`
|
|
147
|
+
|
|
148
|
+
Estimate token count from text. Uses chars/4 heuristic.
|
|
149
|
+
|
|
150
|
+
```typescript
|
|
151
|
+
import { estimateTokens } from "@mcptoolshop/ai-loadout";
|
|
152
|
+
|
|
153
|
+
const tokens = estimateTokens(fileContent); // ~250
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
## Types
|
|
157
|
+
|
|
158
|
+
```typescript
|
|
159
|
+
import type {
|
|
160
|
+
LoadoutEntry,
|
|
161
|
+
LoadoutIndex,
|
|
162
|
+
Frontmatter,
|
|
163
|
+
MatchResult,
|
|
164
|
+
ValidationIssue,
|
|
165
|
+
Priority, // "core" | "domain" | "manual"
|
|
166
|
+
Triggers, // { task, plan, edit }
|
|
167
|
+
Budget,
|
|
168
|
+
} from "@mcptoolshop/ai-loadout";
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
## Consumers
|
|
172
|
+
|
|
173
|
+
- **[@mcptoolshop/claude-rules](https://github.com/mcp-tool-shop-org/claude-rules)** — CLAUDE.md optimizer for Claude Code. Uses ai-loadout for the dispatch table and matching.
|
|
174
|
+
|
|
175
|
+
## Security
|
|
176
|
+
|
|
177
|
+
This package is a pure data library. It does not access the filesystem, make network requests, or collect telemetry. All I/O is the consumer's responsibility.
|
|
178
|
+
|
|
179
|
+
---
|
|
180
|
+
|
|
181
|
+
Built by [MCP Tool Shop](https://mcp-tool-shop.github.io/)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Frontmatter parser and serializer.
|
|
3
|
+
*
|
|
4
|
+
* Parses YAML-like --- delimited blocks from payload files.
|
|
5
|
+
* Hand-rolled: supports strings, inline arrays, booleans, and
|
|
6
|
+
* one level of nested objects (triggers). No deps, deterministic.
|
|
7
|
+
*/
|
|
8
|
+
import type { Frontmatter } from "./types.js";
|
|
9
|
+
export declare function parseFrontmatter(content: string): {
|
|
10
|
+
frontmatter: Frontmatter | null;
|
|
11
|
+
body: string;
|
|
12
|
+
};
|
|
13
|
+
export declare function serializeFrontmatter(fm: Frontmatter): string;
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Frontmatter parser and serializer.
|
|
3
|
+
*
|
|
4
|
+
* Parses YAML-like --- delimited blocks from payload files.
|
|
5
|
+
* Hand-rolled: supports strings, inline arrays, booleans, and
|
|
6
|
+
* one level of nested objects (triggers). No deps, deterministic.
|
|
7
|
+
*/
|
|
8
|
+
import { DEFAULT_TRIGGERS } from "./types.js";
|
|
9
|
+
const VALID_PRIORITIES = new Set(["core", "domain", "manual"]);
|
|
10
|
+
// ── Parse frontmatter from file content ────────────────────────
|
|
11
|
+
export function parseFrontmatter(content) {
|
|
12
|
+
const lines = content.split("\n");
|
|
13
|
+
if (lines[0]?.trim() !== "---") {
|
|
14
|
+
return { frontmatter: null, body: content };
|
|
15
|
+
}
|
|
16
|
+
let closeIndex = -1;
|
|
17
|
+
for (let i = 1; i < lines.length; i++) {
|
|
18
|
+
if (lines[i].trim() === "---") {
|
|
19
|
+
closeIndex = i;
|
|
20
|
+
break;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
if (closeIndex === -1) {
|
|
24
|
+
return { frontmatter: null, body: content };
|
|
25
|
+
}
|
|
26
|
+
const fmLines = lines.slice(1, closeIndex);
|
|
27
|
+
const body = lines.slice(closeIndex + 1).join("\n");
|
|
28
|
+
const data = {};
|
|
29
|
+
let currentKey = "";
|
|
30
|
+
let currentArray = null;
|
|
31
|
+
let currentObject = null;
|
|
32
|
+
for (const line of fmLines) {
|
|
33
|
+
const trimmed = line.trim();
|
|
34
|
+
if (trimmed === "")
|
|
35
|
+
continue;
|
|
36
|
+
// Block array item
|
|
37
|
+
if (currentArray !== null && trimmed.startsWith("- ")) {
|
|
38
|
+
currentArray.push(trimmed.slice(2).trim());
|
|
39
|
+
continue;
|
|
40
|
+
}
|
|
41
|
+
// Nested object value (indented)
|
|
42
|
+
if (currentObject !== null && line.startsWith(" ")) {
|
|
43
|
+
const colonIdx = trimmed.indexOf(":");
|
|
44
|
+
if (colonIdx !== -1) {
|
|
45
|
+
const key = trimmed.slice(0, colonIdx).trim();
|
|
46
|
+
const val = trimmed.slice(colonIdx + 1).trim();
|
|
47
|
+
currentObject[key] = val === "true";
|
|
48
|
+
continue;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
// Flush pending
|
|
52
|
+
if (currentArray !== null) {
|
|
53
|
+
data[currentKey] = currentArray;
|
|
54
|
+
currentArray = null;
|
|
55
|
+
}
|
|
56
|
+
if (currentObject !== null) {
|
|
57
|
+
data[currentKey] = currentObject;
|
|
58
|
+
currentObject = null;
|
|
59
|
+
}
|
|
60
|
+
// Key: value pair
|
|
61
|
+
const colonIdx = trimmed.indexOf(":");
|
|
62
|
+
if (colonIdx === -1)
|
|
63
|
+
continue;
|
|
64
|
+
const key = trimmed.slice(0, colonIdx).trim();
|
|
65
|
+
const rawVal = trimmed.slice(colonIdx + 1).trim();
|
|
66
|
+
currentKey = key;
|
|
67
|
+
if (rawVal === "") {
|
|
68
|
+
// Peek at next line to determine if block array or object
|
|
69
|
+
const lineIdx = fmLines.indexOf(line);
|
|
70
|
+
if (lineIdx + 1 < fmLines.length) {
|
|
71
|
+
const nextTrimmed = fmLines[lineIdx + 1].trim();
|
|
72
|
+
if (nextTrimmed.startsWith("- ")) {
|
|
73
|
+
currentArray = [];
|
|
74
|
+
}
|
|
75
|
+
else if (fmLines[lineIdx + 1].startsWith(" ")) {
|
|
76
|
+
currentObject = {};
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
continue;
|
|
80
|
+
}
|
|
81
|
+
// Inline array: [a, b, c]
|
|
82
|
+
if (rawVal.startsWith("[") && rawVal.endsWith("]")) {
|
|
83
|
+
const inner = rawVal.slice(1, -1);
|
|
84
|
+
data[key] = inner
|
|
85
|
+
.split(",")
|
|
86
|
+
.map((s) => s.trim())
|
|
87
|
+
.filter((s) => s.length > 0);
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
// Boolean
|
|
91
|
+
if (rawVal === "true") {
|
|
92
|
+
data[key] = true;
|
|
93
|
+
continue;
|
|
94
|
+
}
|
|
95
|
+
if (rawVal === "false") {
|
|
96
|
+
data[key] = false;
|
|
97
|
+
continue;
|
|
98
|
+
}
|
|
99
|
+
// String
|
|
100
|
+
data[key] =
|
|
101
|
+
rawVal.startsWith('"') && rawVal.endsWith('"')
|
|
102
|
+
? rawVal.slice(1, -1)
|
|
103
|
+
: rawVal;
|
|
104
|
+
}
|
|
105
|
+
// Flush trailing
|
|
106
|
+
if (currentArray !== null)
|
|
107
|
+
data[currentKey] = currentArray;
|
|
108
|
+
if (currentObject !== null)
|
|
109
|
+
data[currentKey] = currentObject;
|
|
110
|
+
// Validate required fields
|
|
111
|
+
if (typeof data.id !== "string" || !data.id) {
|
|
112
|
+
return { frontmatter: null, body: content };
|
|
113
|
+
}
|
|
114
|
+
const fm = {
|
|
115
|
+
id: data.id,
|
|
116
|
+
keywords: Array.isArray(data.keywords) ? data.keywords : [],
|
|
117
|
+
patterns: Array.isArray(data.patterns) ? data.patterns : [],
|
|
118
|
+
priority: VALID_PRIORITIES.has(data.priority)
|
|
119
|
+
? data.priority
|
|
120
|
+
: "domain",
|
|
121
|
+
triggers: parseTriggers(data.triggers),
|
|
122
|
+
};
|
|
123
|
+
return { frontmatter: fm, body };
|
|
124
|
+
}
|
|
125
|
+
function parseTriggers(raw) {
|
|
126
|
+
if (!raw || typeof raw !== "object")
|
|
127
|
+
return { ...DEFAULT_TRIGGERS };
|
|
128
|
+
const obj = raw;
|
|
129
|
+
return {
|
|
130
|
+
task: typeof obj.task === "boolean" ? obj.task : DEFAULT_TRIGGERS.task,
|
|
131
|
+
plan: typeof obj.plan === "boolean" ? obj.plan : DEFAULT_TRIGGERS.plan,
|
|
132
|
+
edit: typeof obj.edit === "boolean" ? obj.edit : DEFAULT_TRIGGERS.edit,
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
// ── Serialize frontmatter to string ────────────────────────────
|
|
136
|
+
export function serializeFrontmatter(fm) {
|
|
137
|
+
const lines = ["---"];
|
|
138
|
+
lines.push(`id: ${fm.id}`);
|
|
139
|
+
lines.push(`keywords: [${fm.keywords.join(", ")}]`);
|
|
140
|
+
if (fm.patterns.length > 0) {
|
|
141
|
+
lines.push(`patterns: [${fm.patterns.join(", ")}]`);
|
|
142
|
+
}
|
|
143
|
+
lines.push(`priority: ${fm.priority}`);
|
|
144
|
+
lines.push("triggers:");
|
|
145
|
+
lines.push(` task: ${fm.triggers.task}`);
|
|
146
|
+
lines.push(` plan: ${fm.triggers.plan}`);
|
|
147
|
+
lines.push(` edit: ${fm.triggers.edit}`);
|
|
148
|
+
lines.push("---");
|
|
149
|
+
return lines.join("\n");
|
|
150
|
+
}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export type { Priority, Triggers, LoadoutEntry, Budget, LoadoutIndex, Frontmatter, MatchResult, IssueSeverity, ValidationIssue, } from "./types.js";
|
|
2
|
+
export { DEFAULT_TRIGGERS } from "./types.js";
|
|
3
|
+
export { parseFrontmatter, serializeFrontmatter } from "./frontmatter.js";
|
|
4
|
+
export { estimateTokens } from "./tokens.js";
|
|
5
|
+
export { matchLoadout, lookupEntry } from "./match.js";
|
|
6
|
+
export { validateIndex } from "./validate.js";
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
export { DEFAULT_TRIGGERS } from "./types.js";
|
|
2
|
+
// ── Frontmatter ────────────────────────────────────────────────
|
|
3
|
+
export { parseFrontmatter, serializeFrontmatter } from "./frontmatter.js";
|
|
4
|
+
// ── Tokens ─────────────────────────────────────────────────────
|
|
5
|
+
export { estimateTokens } from "./tokens.js";
|
|
6
|
+
// ── Matcher ────────────────────────────────────────────────────
|
|
7
|
+
export { matchLoadout, lookupEntry } from "./match.js";
|
|
8
|
+
// ── Validator ──────────────────────────────────────────────────
|
|
9
|
+
export { validateIndex } from "./validate.js";
|
package/dist/match.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Loadout matcher.
|
|
3
|
+
*
|
|
4
|
+
* Given a task description and a loadout index, returns which payload
|
|
5
|
+
* entries should be loaded, ranked by match strength.
|
|
6
|
+
*
|
|
7
|
+
* Matching is deterministic:
|
|
8
|
+
* - Tokenizes the task into lowercase words
|
|
9
|
+
* - Scores each entry by keyword and pattern overlap
|
|
10
|
+
* - Returns entries above a minimum score threshold
|
|
11
|
+
* - Core entries are always included (score 1.0)
|
|
12
|
+
* - Manual entries are never auto-included (require explicit lookup)
|
|
13
|
+
*/
|
|
14
|
+
import type { LoadoutIndex, LoadoutEntry, MatchResult } from "./types.js";
|
|
15
|
+
export declare function matchLoadout(task: string, index: LoadoutIndex): MatchResult[];
|
|
16
|
+
export declare function lookupEntry(id: string, index: LoadoutIndex): LoadoutEntry | undefined;
|
package/dist/match.js
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Loadout matcher.
|
|
3
|
+
*
|
|
4
|
+
* Given a task description and a loadout index, returns which payload
|
|
5
|
+
* entries should be loaded, ranked by match strength.
|
|
6
|
+
*
|
|
7
|
+
* Matching is deterministic:
|
|
8
|
+
* - Tokenizes the task into lowercase words
|
|
9
|
+
* - Scores each entry by keyword and pattern overlap
|
|
10
|
+
* - Returns entries above a minimum score threshold
|
|
11
|
+
* - Core entries are always included (score 1.0)
|
|
12
|
+
* - Manual entries are never auto-included (require explicit lookup)
|
|
13
|
+
*/
|
|
14
|
+
const MIN_SCORE = 0.1; // minimum score to include a domain entry
|
|
15
|
+
// ── Tokenize a task description into matchable words ───────────
|
|
16
|
+
function tokenize(text) {
|
|
17
|
+
return new Set(text
|
|
18
|
+
.toLowerCase()
|
|
19
|
+
.replace(/[^a-z0-9\s-]/g, " ")
|
|
20
|
+
.split(/\s+/)
|
|
21
|
+
.filter((w) => w.length > 1));
|
|
22
|
+
}
|
|
23
|
+
// ── Score a single entry against task tokens ───────────────────
|
|
24
|
+
function scoreEntry(entry, taskTokens) {
|
|
25
|
+
// Core entries always match
|
|
26
|
+
if (entry.priority === "core") {
|
|
27
|
+
return { score: 1.0, matchedKeywords: [], matchedPatterns: [] };
|
|
28
|
+
}
|
|
29
|
+
// Manual entries never auto-match
|
|
30
|
+
if (entry.priority === "manual") {
|
|
31
|
+
return { score: 0, matchedKeywords: [], matchedPatterns: [] };
|
|
32
|
+
}
|
|
33
|
+
// Domain entries: score by keyword and pattern overlap
|
|
34
|
+
const matchedKeywords = [];
|
|
35
|
+
const matchedPatterns = [];
|
|
36
|
+
// Keyword matching: each keyword hit contributes to the score
|
|
37
|
+
for (const kw of entry.keywords) {
|
|
38
|
+
// Multi-word keywords: check if all words are present
|
|
39
|
+
const kwWords = kw.split(/[\s-]+/);
|
|
40
|
+
if (kwWords.every((w) => taskTokens.has(w))) {
|
|
41
|
+
matchedKeywords.push(kw);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
// Pattern matching: check if any pattern name words appear in the task
|
|
45
|
+
for (const pattern of entry.patterns) {
|
|
46
|
+
const patternWords = pattern.split("_");
|
|
47
|
+
if (patternWords.some((w) => taskTokens.has(w))) {
|
|
48
|
+
matchedPatterns.push(pattern);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
// Score: proportion of keywords matched + pattern bonus
|
|
52
|
+
const keywordScore = entry.keywords.length > 0
|
|
53
|
+
? matchedKeywords.length / entry.keywords.length
|
|
54
|
+
: 0;
|
|
55
|
+
const patternBonus = entry.patterns.length > 0 && matchedPatterns.length > 0
|
|
56
|
+
? 0.2
|
|
57
|
+
: 0;
|
|
58
|
+
const score = Math.min(1.0, keywordScore + patternBonus);
|
|
59
|
+
return { score, matchedKeywords, matchedPatterns };
|
|
60
|
+
}
|
|
61
|
+
// ── Match a task against a loadout index ────────────────────────
|
|
62
|
+
// Returns entries that should be loaded, sorted by score (highest first).
|
|
63
|
+
export function matchLoadout(task, index) {
|
|
64
|
+
const taskTokens = tokenize(task);
|
|
65
|
+
const results = [];
|
|
66
|
+
for (const entry of index.entries) {
|
|
67
|
+
const { score, matchedKeywords, matchedPatterns } = scoreEntry(entry, taskTokens);
|
|
68
|
+
if (score >= MIN_SCORE) {
|
|
69
|
+
results.push({ entry, score, matchedKeywords, matchedPatterns });
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
// Sort by score descending, then by token cost ascending (prefer cheaper)
|
|
73
|
+
results.sort((a, b) => {
|
|
74
|
+
if (b.score !== a.score)
|
|
75
|
+
return b.score - a.score;
|
|
76
|
+
return a.entry.tokens_est - b.entry.tokens_est;
|
|
77
|
+
});
|
|
78
|
+
return results;
|
|
79
|
+
}
|
|
80
|
+
// ── Look up a specific entry by ID ─────────────────────────────
|
|
81
|
+
// For manual entries or explicit lookup.
|
|
82
|
+
export function lookupEntry(id, index) {
|
|
83
|
+
return index.entries.find((e) => e.id === id);
|
|
84
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { describe, it } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { parseFrontmatter, serializeFrontmatter } from "../frontmatter.js";
|
|
4
|
+
import { DEFAULT_TRIGGERS } from "../types.js";
|
|
5
|
+
describe("parseFrontmatter", () => {
|
|
6
|
+
it("returns null when no delimiters", () => {
|
|
7
|
+
const { frontmatter, body } = parseFrontmatter("Just content");
|
|
8
|
+
assert.equal(frontmatter, null);
|
|
9
|
+
assert.equal(body, "Just content");
|
|
10
|
+
});
|
|
11
|
+
it("parses basic frontmatter", () => {
|
|
12
|
+
const content = "---\nid: test\nkeywords: [ci, deploy]\npriority: domain\n---\n\nBody";
|
|
13
|
+
const { frontmatter, body } = parseFrontmatter(content);
|
|
14
|
+
assert.ok(frontmatter);
|
|
15
|
+
assert.equal(frontmatter.id, "test");
|
|
16
|
+
assert.deepEqual(frontmatter.keywords, ["ci", "deploy"]);
|
|
17
|
+
assert.equal(frontmatter.priority, "domain");
|
|
18
|
+
assert.ok(body.includes("Body"));
|
|
19
|
+
});
|
|
20
|
+
it("parses triggers block", () => {
|
|
21
|
+
const content = "---\nid: t\nkeywords: [x]\npriority: core\ntriggers:\n task: false\n plan: true\n edit: true\n---\nBody";
|
|
22
|
+
const { frontmatter } = parseFrontmatter(content);
|
|
23
|
+
assert.ok(frontmatter);
|
|
24
|
+
assert.equal(frontmatter.triggers.task, false);
|
|
25
|
+
assert.equal(frontmatter.triggers.plan, true);
|
|
26
|
+
assert.equal(frontmatter.triggers.edit, true);
|
|
27
|
+
});
|
|
28
|
+
it("defaults triggers when missing", () => {
|
|
29
|
+
const content = "---\nid: t\nkeywords: [x]\npriority: domain\n---\nBody";
|
|
30
|
+
const { frontmatter } = parseFrontmatter(content);
|
|
31
|
+
assert.ok(frontmatter);
|
|
32
|
+
assert.deepEqual(frontmatter.triggers, DEFAULT_TRIGGERS);
|
|
33
|
+
});
|
|
34
|
+
it("returns null when id missing", () => {
|
|
35
|
+
const content = "---\nkeywords: [x]\npriority: domain\n---\nBody";
|
|
36
|
+
const { frontmatter } = parseFrontmatter(content);
|
|
37
|
+
assert.equal(frontmatter, null);
|
|
38
|
+
});
|
|
39
|
+
it("defaults invalid priority to domain", () => {
|
|
40
|
+
const content = "---\nid: t\nkeywords: [x]\npriority: bogus\n---\nBody";
|
|
41
|
+
const { frontmatter } = parseFrontmatter(content);
|
|
42
|
+
assert.ok(frontmatter);
|
|
43
|
+
assert.equal(frontmatter.priority, "domain");
|
|
44
|
+
});
|
|
45
|
+
it("handles patterns field", () => {
|
|
46
|
+
const content = "---\nid: t\nkeywords: [x]\npatterns: [ci_pipeline, deploy_flow]\npriority: domain\n---\nBody";
|
|
47
|
+
const { frontmatter } = parseFrontmatter(content);
|
|
48
|
+
assert.ok(frontmatter);
|
|
49
|
+
assert.deepEqual(frontmatter.patterns, ["ci_pipeline", "deploy_flow"]);
|
|
50
|
+
});
|
|
51
|
+
it("handles empty keywords gracefully", () => {
|
|
52
|
+
const content = "---\nid: t\nkeywords: []\npriority: manual\n---\nBody";
|
|
53
|
+
const { frontmatter } = parseFrontmatter(content);
|
|
54
|
+
assert.ok(frontmatter);
|
|
55
|
+
assert.deepEqual(frontmatter.keywords, []);
|
|
56
|
+
});
|
|
57
|
+
});
|
|
58
|
+
describe("serializeFrontmatter", () => {
|
|
59
|
+
it("round-trips correctly", () => {
|
|
60
|
+
const original = {
|
|
61
|
+
id: "github-actions",
|
|
62
|
+
keywords: ["ci", "workflow"],
|
|
63
|
+
patterns: ["ci_pipeline"],
|
|
64
|
+
priority: "domain",
|
|
65
|
+
triggers: { task: true, plan: true, edit: false },
|
|
66
|
+
};
|
|
67
|
+
const serialized = serializeFrontmatter(original);
|
|
68
|
+
const { frontmatter } = parseFrontmatter(serialized + "\n\nBody");
|
|
69
|
+
assert.ok(frontmatter);
|
|
70
|
+
assert.equal(frontmatter.id, original.id);
|
|
71
|
+
assert.deepEqual(frontmatter.keywords, original.keywords);
|
|
72
|
+
assert.deepEqual(frontmatter.patterns, original.patterns);
|
|
73
|
+
assert.equal(frontmatter.priority, original.priority);
|
|
74
|
+
assert.deepEqual(frontmatter.triggers, original.triggers);
|
|
75
|
+
});
|
|
76
|
+
it("omits empty patterns", () => {
|
|
77
|
+
const fm = {
|
|
78
|
+
id: "test",
|
|
79
|
+
keywords: ["x"],
|
|
80
|
+
patterns: [],
|
|
81
|
+
priority: "core",
|
|
82
|
+
triggers: { task: true, plan: true, edit: false },
|
|
83
|
+
};
|
|
84
|
+
const serialized = serializeFrontmatter(fm);
|
|
85
|
+
assert.ok(!serialized.includes("patterns:"));
|
|
86
|
+
});
|
|
87
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { describe, it } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { matchLoadout, lookupEntry } from "../match.js";
|
|
4
|
+
import { DEFAULT_TRIGGERS } from "../types.js";
|
|
5
|
+
function makeIndex(...entries) {
|
|
6
|
+
return {
|
|
7
|
+
version: "1.0.0",
|
|
8
|
+
generated: new Date().toISOString(),
|
|
9
|
+
entries: entries.map((e) => ({
|
|
10
|
+
id: e.id,
|
|
11
|
+
path: `.rules/${e.id}.md`,
|
|
12
|
+
keywords: e.keywords,
|
|
13
|
+
patterns: e.patterns ?? [],
|
|
14
|
+
priority: e.priority ?? "domain",
|
|
15
|
+
summary: `Summary for ${e.id}`,
|
|
16
|
+
triggers: { ...DEFAULT_TRIGGERS },
|
|
17
|
+
tokens_est: 100,
|
|
18
|
+
lines: 10,
|
|
19
|
+
})),
|
|
20
|
+
budget: {
|
|
21
|
+
always_loaded_est: 0,
|
|
22
|
+
on_demand_total_est: 0,
|
|
23
|
+
avg_task_load_est: 0,
|
|
24
|
+
avg_task_load_observed: null,
|
|
25
|
+
},
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
describe("matchLoadout", () => {
|
|
29
|
+
it("always includes core entries", () => {
|
|
30
|
+
const index = makeIndex({ id: "core-rule", keywords: [], priority: "core" }, { id: "domain-rule", keywords: ["ci"], priority: "domain" });
|
|
31
|
+
const results = matchLoadout("unrelated task", index);
|
|
32
|
+
assert.equal(results.length, 1);
|
|
33
|
+
assert.equal(results[0].entry.id, "core-rule");
|
|
34
|
+
assert.equal(results[0].score, 1.0);
|
|
35
|
+
});
|
|
36
|
+
it("never auto-includes manual entries", () => {
|
|
37
|
+
const index = makeIndex({ id: "manual-rule", keywords: ["ci", "deploy"], priority: "manual" });
|
|
38
|
+
const results = matchLoadout("fix the ci pipeline and deploy", index);
|
|
39
|
+
assert.equal(results.length, 0);
|
|
40
|
+
});
|
|
41
|
+
it("matches domain entries by keywords", () => {
|
|
42
|
+
const index = makeIndex({ id: "ci", keywords: ["ci", "workflow", "runner"] }, { id: "shipping", keywords: ["publish", "release", "npm"] });
|
|
43
|
+
const results = matchLoadout("update the ci workflow", index);
|
|
44
|
+
assert.equal(results.length, 1);
|
|
45
|
+
assert.equal(results[0].entry.id, "ci");
|
|
46
|
+
assert.ok(results[0].matchedKeywords.includes("ci"));
|
|
47
|
+
assert.ok(results[0].matchedKeywords.includes("workflow"));
|
|
48
|
+
});
|
|
49
|
+
it("scores by keyword overlap proportion", () => {
|
|
50
|
+
const index = makeIndex({ id: "narrow", keywords: ["ci", "workflow", "runner", "matrix", "dependabot"] }, { id: "broad", keywords: ["ci", "workflow"] });
|
|
51
|
+
const results = matchLoadout("fix the ci workflow", index);
|
|
52
|
+
// "broad" has 2/2 match (1.0), "narrow" has 2/5 match (0.4)
|
|
53
|
+
assert.equal(results[0].entry.id, "broad");
|
|
54
|
+
assert.ok(results[0].score > results[1].score);
|
|
55
|
+
});
|
|
56
|
+
it("gives pattern bonus", () => {
|
|
57
|
+
// Use entries where keyword match is partial so the 0.2 bonus is visible
|
|
58
|
+
const index = makeIndex({ id: "with-pattern", keywords: ["ci", "workflow", "runner"], patterns: ["ci_pipeline"] }, { id: "without-pattern", keywords: ["ci", "workflow", "runner"] });
|
|
59
|
+
// Task matches 1/3 keywords (0.33) + pattern bonus (0.2) = 0.53 vs 0.33
|
|
60
|
+
const results = matchLoadout("fix the ci pipeline", index);
|
|
61
|
+
const withPattern = results.find((r) => r.entry.id === "with-pattern");
|
|
62
|
+
const without = results.find((r) => r.entry.id === "without-pattern");
|
|
63
|
+
assert.ok(withPattern.score > without.score);
|
|
64
|
+
});
|
|
65
|
+
it("returns empty for no matches", () => {
|
|
66
|
+
const index = makeIndex({ id: "ci", keywords: ["ci", "workflow"] });
|
|
67
|
+
const results = matchLoadout("update the readme", index);
|
|
68
|
+
assert.equal(results.length, 0);
|
|
69
|
+
});
|
|
70
|
+
it("sorts by score descending", () => {
|
|
71
|
+
const index = makeIndex({ id: "low", keywords: ["ci", "workflow", "runner", "matrix", "dependabot"] }, { id: "high", keywords: ["ci"] });
|
|
72
|
+
const results = matchLoadout("fix ci", index);
|
|
73
|
+
assert.equal(results[0].entry.id, "high");
|
|
74
|
+
});
|
|
75
|
+
});
|
|
76
|
+
describe("lookupEntry", () => {
|
|
77
|
+
it("finds entry by id", () => {
|
|
78
|
+
const index = makeIndex({ id: "ci", keywords: ["ci"] }, { id: "shipping", keywords: ["npm"] });
|
|
79
|
+
const entry = lookupEntry("shipping", index);
|
|
80
|
+
assert.ok(entry);
|
|
81
|
+
assert.equal(entry.id, "shipping");
|
|
82
|
+
});
|
|
83
|
+
it("returns undefined for missing id", () => {
|
|
84
|
+
const index = makeIndex({ id: "ci", keywords: ["ci"] });
|
|
85
|
+
const entry = lookupEntry("nope", index);
|
|
86
|
+
assert.equal(entry, undefined);
|
|
87
|
+
});
|
|
88
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { describe, it } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { estimateTokens } from "../tokens.js";
|
|
4
|
+
describe("estimateTokens", () => {
|
|
5
|
+
it("estimates ~1 token per 4 chars", () => {
|
|
6
|
+
assert.equal(estimateTokens("abcd"), 1);
|
|
7
|
+
assert.equal(estimateTokens("abcde"), 2);
|
|
8
|
+
assert.equal(estimateTokens(""), 0);
|
|
9
|
+
});
|
|
10
|
+
it("handles long text", () => {
|
|
11
|
+
const text = "a".repeat(400);
|
|
12
|
+
assert.equal(estimateTokens(text), 100);
|
|
13
|
+
});
|
|
14
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { describe, it } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { validateIndex } from "../validate.js";
|
|
4
|
+
import { DEFAULT_TRIGGERS } from "../types.js";
|
|
5
|
+
function makeEntry(overrides = {}) {
|
|
6
|
+
return {
|
|
7
|
+
id: "test-rule",
|
|
8
|
+
path: ".rules/test-rule.md",
|
|
9
|
+
keywords: ["test"],
|
|
10
|
+
patterns: [],
|
|
11
|
+
priority: "domain",
|
|
12
|
+
summary: "A test rule",
|
|
13
|
+
triggers: { ...DEFAULT_TRIGGERS },
|
|
14
|
+
tokens_est: 100,
|
|
15
|
+
lines: 10,
|
|
16
|
+
...overrides,
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
function makeIndex(entries) {
|
|
20
|
+
return {
|
|
21
|
+
version: "1.0.0",
|
|
22
|
+
generated: new Date().toISOString(),
|
|
23
|
+
entries,
|
|
24
|
+
budget: {
|
|
25
|
+
always_loaded_est: 100,
|
|
26
|
+
on_demand_total_est: 200,
|
|
27
|
+
avg_task_load_est: 100,
|
|
28
|
+
avg_task_load_observed: null,
|
|
29
|
+
},
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
describe("validateIndex", () => {
|
|
33
|
+
it("passes clean index", () => {
|
|
34
|
+
const index = makeIndex([makeEntry()]);
|
|
35
|
+
const issues = validateIndex(index);
|
|
36
|
+
assert.equal(issues.length, 0);
|
|
37
|
+
});
|
|
38
|
+
it("catches missing version", () => {
|
|
39
|
+
const index = makeIndex([makeEntry()]);
|
|
40
|
+
index.version = "";
|
|
41
|
+
const issues = validateIndex(index);
|
|
42
|
+
assert.ok(issues.some((i) => i.code === "MISSING_VERSION"));
|
|
43
|
+
});
|
|
44
|
+
it("catches duplicate IDs", () => {
|
|
45
|
+
const index = makeIndex([
|
|
46
|
+
makeEntry({ id: "dupe" }),
|
|
47
|
+
makeEntry({ id: "dupe" }),
|
|
48
|
+
]);
|
|
49
|
+
const issues = validateIndex(index);
|
|
50
|
+
assert.ok(issues.some((i) => i.code === "DUPLICATE_ID"));
|
|
51
|
+
});
|
|
52
|
+
it("catches missing summary", () => {
|
|
53
|
+
const index = makeIndex([makeEntry({ summary: "" })]);
|
|
54
|
+
const issues = validateIndex(index);
|
|
55
|
+
assert.ok(issues.some((i) => i.code === "MISSING_SUMMARY"));
|
|
56
|
+
});
|
|
57
|
+
it("warns on long summary", () => {
|
|
58
|
+
const index = makeIndex([makeEntry({ summary: "x".repeat(121) })]);
|
|
59
|
+
const issues = validateIndex(index);
|
|
60
|
+
assert.ok(issues.some((i) => i.code === "LONG_SUMMARY"));
|
|
61
|
+
});
|
|
62
|
+
it("catches empty keywords on domain entries", () => {
|
|
63
|
+
const index = makeIndex([makeEntry({ priority: "domain", keywords: [] })]);
|
|
64
|
+
const issues = validateIndex(index);
|
|
65
|
+
assert.ok(issues.some((i) => i.code === "EMPTY_KEYWORDS"));
|
|
66
|
+
});
|
|
67
|
+
it("allows empty keywords on manual entries", () => {
|
|
68
|
+
const index = makeIndex([makeEntry({ priority: "manual", keywords: [] })]);
|
|
69
|
+
const issues = validateIndex(index);
|
|
70
|
+
assert.ok(!issues.some((i) => i.code === "EMPTY_KEYWORDS"));
|
|
71
|
+
});
|
|
72
|
+
it("catches missing path", () => {
|
|
73
|
+
const index = makeIndex([makeEntry({ path: "" })]);
|
|
74
|
+
const issues = validateIndex(index);
|
|
75
|
+
assert.ok(issues.some((i) => i.code === "MISSING_PATH"));
|
|
76
|
+
});
|
|
77
|
+
it("warns on non-kebab-case ID", () => {
|
|
78
|
+
const index = makeIndex([makeEntry({ id: "NotKebab" })]);
|
|
79
|
+
const issues = validateIndex(index);
|
|
80
|
+
assert.ok(issues.some((i) => i.code === "BAD_ID_FORMAT"));
|
|
81
|
+
});
|
|
82
|
+
it("catches invalid priority", () => {
|
|
83
|
+
const index = makeIndex([makeEntry({ priority: "bogus" })]);
|
|
84
|
+
const issues = validateIndex(index);
|
|
85
|
+
assert.ok(issues.some((i) => i.code === "INVALID_PRIORITY"));
|
|
86
|
+
});
|
|
87
|
+
it("warns on negative token estimate", () => {
|
|
88
|
+
const index = makeIndex([makeEntry({ tokens_est: -5 })]);
|
|
89
|
+
const issues = validateIndex(index);
|
|
90
|
+
assert.ok(issues.some((i) => i.code === "BAD_TOKEN_EST"));
|
|
91
|
+
});
|
|
92
|
+
});
|
package/dist/tokens.d.ts
ADDED
package/dist/tokens.js
ADDED
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
export type Priority = "core" | "domain" | "manual";
|
|
2
|
+
export interface Triggers {
|
|
3
|
+
task: boolean;
|
|
4
|
+
plan: boolean;
|
|
5
|
+
edit: boolean;
|
|
6
|
+
}
|
|
7
|
+
export interface LoadoutEntry {
|
|
8
|
+
id: string;
|
|
9
|
+
path: string;
|
|
10
|
+
keywords: string[];
|
|
11
|
+
patterns: string[];
|
|
12
|
+
priority: Priority;
|
|
13
|
+
summary: string;
|
|
14
|
+
triggers: Triggers;
|
|
15
|
+
tokens_est: number;
|
|
16
|
+
lines: number;
|
|
17
|
+
}
|
|
18
|
+
export interface Budget {
|
|
19
|
+
always_loaded_est: number;
|
|
20
|
+
on_demand_total_est: number;
|
|
21
|
+
avg_task_load_est: number;
|
|
22
|
+
avg_task_load_observed: number | null;
|
|
23
|
+
}
|
|
24
|
+
export interface LoadoutIndex {
|
|
25
|
+
version: string;
|
|
26
|
+
generated: string;
|
|
27
|
+
entries: LoadoutEntry[];
|
|
28
|
+
budget: Budget;
|
|
29
|
+
}
|
|
30
|
+
export interface Frontmatter {
|
|
31
|
+
id: string;
|
|
32
|
+
keywords: string[];
|
|
33
|
+
patterns: string[];
|
|
34
|
+
priority: Priority;
|
|
35
|
+
triggers: Triggers;
|
|
36
|
+
}
|
|
37
|
+
export interface MatchResult {
|
|
38
|
+
entry: LoadoutEntry;
|
|
39
|
+
score: number;
|
|
40
|
+
matchedKeywords: string[];
|
|
41
|
+
matchedPatterns: string[];
|
|
42
|
+
}
|
|
43
|
+
export type IssueSeverity = "error" | "warning";
|
|
44
|
+
export interface ValidationIssue {
|
|
45
|
+
severity: IssueSeverity;
|
|
46
|
+
code: string;
|
|
47
|
+
message: string;
|
|
48
|
+
entryId?: string;
|
|
49
|
+
}
|
|
50
|
+
export declare const DEFAULT_TRIGGERS: Triggers;
|
package/dist/types.js
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Index validator.
|
|
3
|
+
*
|
|
4
|
+
* Validates the structural integrity of a LoadoutIndex.
|
|
5
|
+
* Does NOT check the filesystem — that's the consumer's job
|
|
6
|
+
* (e.g. claude-rules checks that files exist on disk).
|
|
7
|
+
*
|
|
8
|
+
* This validates the data model only:
|
|
9
|
+
* - Required fields present
|
|
10
|
+
* - IDs unique and kebab-case
|
|
11
|
+
* - Summaries present and bounded
|
|
12
|
+
* - Domain entries have keywords
|
|
13
|
+
* - No empty arrays where content is expected
|
|
14
|
+
* - Budget numbers are non-negative
|
|
15
|
+
*/
|
|
16
|
+
import type { LoadoutIndex, ValidationIssue } from "./types.js";
|
|
17
|
+
export declare function validateIndex(index: LoadoutIndex): ValidationIssue[];
|
package/dist/validate.js
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Index validator.
|
|
3
|
+
*
|
|
4
|
+
* Validates the structural integrity of a LoadoutIndex.
|
|
5
|
+
* Does NOT check the filesystem — that's the consumer's job
|
|
6
|
+
* (e.g. claude-rules checks that files exist on disk).
|
|
7
|
+
*
|
|
8
|
+
* This validates the data model only:
|
|
9
|
+
* - Required fields present
|
|
10
|
+
* - IDs unique and kebab-case
|
|
11
|
+
* - Summaries present and bounded
|
|
12
|
+
* - Domain entries have keywords
|
|
13
|
+
* - No empty arrays where content is expected
|
|
14
|
+
* - Budget numbers are non-negative
|
|
15
|
+
*/
|
|
16
|
+
const KEBAB_RE = /^[a-z0-9]+(-[a-z0-9]+)*$/;
|
|
17
|
+
const VALID_PRIORITIES = new Set(["core", "domain", "manual"]);
|
|
18
|
+
export function validateIndex(index) {
|
|
19
|
+
const issues = [];
|
|
20
|
+
// Version check
|
|
21
|
+
if (!index.version) {
|
|
22
|
+
issues.push({
|
|
23
|
+
severity: "error",
|
|
24
|
+
code: "MISSING_VERSION",
|
|
25
|
+
message: "Index is missing a version field",
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
// Generated timestamp
|
|
29
|
+
if (!index.generated) {
|
|
30
|
+
issues.push({
|
|
31
|
+
severity: "warning",
|
|
32
|
+
code: "MISSING_GENERATED",
|
|
33
|
+
message: "Index is missing a generated timestamp",
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
// Entries array
|
|
37
|
+
if (!Array.isArray(index.entries)) {
|
|
38
|
+
issues.push({
|
|
39
|
+
severity: "error",
|
|
40
|
+
code: "INVALID_ENTRIES",
|
|
41
|
+
message: "Index entries must be an array",
|
|
42
|
+
});
|
|
43
|
+
return issues; // can't continue
|
|
44
|
+
}
|
|
45
|
+
// Per-entry validation
|
|
46
|
+
const ids = new Set();
|
|
47
|
+
for (const entry of index.entries) {
|
|
48
|
+
// ID required
|
|
49
|
+
if (!entry.id) {
|
|
50
|
+
issues.push({
|
|
51
|
+
severity: "error",
|
|
52
|
+
code: "MISSING_ID",
|
|
53
|
+
message: "Entry is missing an id field",
|
|
54
|
+
});
|
|
55
|
+
continue;
|
|
56
|
+
}
|
|
57
|
+
// ID format
|
|
58
|
+
if (!KEBAB_RE.test(entry.id)) {
|
|
59
|
+
issues.push({
|
|
60
|
+
severity: "warning",
|
|
61
|
+
code: "BAD_ID_FORMAT",
|
|
62
|
+
message: `ID "${entry.id}" is not kebab-case`,
|
|
63
|
+
entryId: entry.id,
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
// Duplicate ID
|
|
67
|
+
if (ids.has(entry.id)) {
|
|
68
|
+
issues.push({
|
|
69
|
+
severity: "error",
|
|
70
|
+
code: "DUPLICATE_ID",
|
|
71
|
+
message: `Duplicate entry ID: "${entry.id}"`,
|
|
72
|
+
entryId: entry.id,
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
ids.add(entry.id);
|
|
76
|
+
// Path required
|
|
77
|
+
if (!entry.path) {
|
|
78
|
+
issues.push({
|
|
79
|
+
severity: "error",
|
|
80
|
+
code: "MISSING_PATH",
|
|
81
|
+
message: `Entry "${entry.id}" has no path`,
|
|
82
|
+
entryId: entry.id,
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
// Priority valid
|
|
86
|
+
if (!VALID_PRIORITIES.has(entry.priority)) {
|
|
87
|
+
issues.push({
|
|
88
|
+
severity: "error",
|
|
89
|
+
code: "INVALID_PRIORITY",
|
|
90
|
+
message: `Entry "${entry.id}" has invalid priority "${entry.priority}"`,
|
|
91
|
+
entryId: entry.id,
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
// Summary required and bounded
|
|
95
|
+
if (!entry.summary || entry.summary.length === 0) {
|
|
96
|
+
issues.push({
|
|
97
|
+
severity: "error",
|
|
98
|
+
code: "MISSING_SUMMARY",
|
|
99
|
+
message: `Entry "${entry.id}" has no summary`,
|
|
100
|
+
entryId: entry.id,
|
|
101
|
+
});
|
|
102
|
+
}
|
|
103
|
+
else if (entry.summary.length > 120) {
|
|
104
|
+
issues.push({
|
|
105
|
+
severity: "warning",
|
|
106
|
+
code: "LONG_SUMMARY",
|
|
107
|
+
message: `Entry "${entry.id}" summary exceeds 120 chars (${entry.summary.length})`,
|
|
108
|
+
entryId: entry.id,
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
// Domain entries must have keywords
|
|
112
|
+
if (entry.priority === "domain" && (!entry.keywords || entry.keywords.length === 0)) {
|
|
113
|
+
issues.push({
|
|
114
|
+
severity: "error",
|
|
115
|
+
code: "EMPTY_KEYWORDS",
|
|
116
|
+
message: `Domain entry "${entry.id}" has no keywords — cannot be routed`,
|
|
117
|
+
entryId: entry.id,
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
// Token estimate sanity
|
|
121
|
+
if (typeof entry.tokens_est !== "number" || entry.tokens_est < 0) {
|
|
122
|
+
issues.push({
|
|
123
|
+
severity: "warning",
|
|
124
|
+
code: "BAD_TOKEN_EST",
|
|
125
|
+
message: `Entry "${entry.id}" has invalid token estimate: ${entry.tokens_est}`,
|
|
126
|
+
entryId: entry.id,
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
// Budget validation
|
|
131
|
+
if (index.budget) {
|
|
132
|
+
if (index.budget.always_loaded_est < 0) {
|
|
133
|
+
issues.push({
|
|
134
|
+
severity: "warning",
|
|
135
|
+
code: "NEGATIVE_BUDGET",
|
|
136
|
+
message: "always_loaded_est is negative",
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
if (index.budget.on_demand_total_est < 0) {
|
|
140
|
+
issues.push({
|
|
141
|
+
severity: "warning",
|
|
142
|
+
code: "NEGATIVE_BUDGET",
|
|
143
|
+
message: "on_demand_total_est is negative",
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
return issues;
|
|
148
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@mcptoolshop/ai-loadout",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Context-aware knowledge router for AI agents. Dispatch table, frontmatter spec, keyword matcher, token estimator.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.js"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"scripts": {
|
|
15
|
+
"build": "tsc",
|
|
16
|
+
"verify": "tsc --noEmit && node --test dist/tests/*.test.js",
|
|
17
|
+
"test": "node --test dist/tests/*.test.js",
|
|
18
|
+
"prepublishOnly": "npm run build"
|
|
19
|
+
},
|
|
20
|
+
"files": [
|
|
21
|
+
"dist",
|
|
22
|
+
"README.md",
|
|
23
|
+
"CHANGELOG.md",
|
|
24
|
+
"LICENSE"
|
|
25
|
+
],
|
|
26
|
+
"keywords": [
|
|
27
|
+
"ai",
|
|
28
|
+
"agent",
|
|
29
|
+
"loadout",
|
|
30
|
+
"knowledge",
|
|
31
|
+
"routing",
|
|
32
|
+
"dispatch",
|
|
33
|
+
"context",
|
|
34
|
+
"frontmatter",
|
|
35
|
+
"mcp"
|
|
36
|
+
],
|
|
37
|
+
"author": "mcp-tool-shop",
|
|
38
|
+
"license": "MIT",
|
|
39
|
+
"engines": {
|
|
40
|
+
"node": ">=20"
|
|
41
|
+
},
|
|
42
|
+
"devDependencies": {
|
|
43
|
+
"@types/node": "^22.0.0",
|
|
44
|
+
"typescript": "^5.7.0"
|
|
45
|
+
}
|
|
46
|
+
}
|