loreli 0.0.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/LICENSE +1 -1
  2. package/README.md +710 -97
  3. package/bin/loreli.js +89 -0
  4. package/package.json +77 -14
  5. package/packages/README.md +101 -0
  6. package/packages/action/README.md +98 -0
  7. package/packages/action/prompts/action.md +172 -0
  8. package/packages/action/src/index.js +684 -0
  9. package/packages/agent/README.md +606 -0
  10. package/packages/agent/src/backends/claude.js +387 -0
  11. package/packages/agent/src/backends/codex.js +351 -0
  12. package/packages/agent/src/backends/cursor.js +371 -0
  13. package/packages/agent/src/backends/index.js +486 -0
  14. package/packages/agent/src/base.js +138 -0
  15. package/packages/agent/src/cli.js +275 -0
  16. package/packages/agent/src/discover.js +396 -0
  17. package/packages/agent/src/factory.js +124 -0
  18. package/packages/agent/src/index.js +12 -0
  19. package/packages/agent/src/models.js +159 -0
  20. package/packages/agent/src/output.js +62 -0
  21. package/packages/agent/src/session.js +162 -0
  22. package/packages/agent/src/trace.js +186 -0
  23. package/packages/classify/README.md +136 -0
  24. package/packages/classify/prompts/blocker.md +12 -0
  25. package/packages/classify/prompts/feedback.md +14 -0
  26. package/packages/classify/prompts/pane-state.md +20 -0
  27. package/packages/classify/src/index.js +81 -0
  28. package/packages/config/README.md +898 -0
  29. package/packages/config/src/defaults.js +145 -0
  30. package/packages/config/src/index.js +223 -0
  31. package/packages/config/src/schema.js +291 -0
  32. package/packages/config/src/validate.js +160 -0
  33. package/packages/context/README.md +165 -0
  34. package/packages/context/src/index.js +198 -0
  35. package/packages/hub/README.md +338 -0
  36. package/packages/hub/src/base.js +154 -0
  37. package/packages/hub/src/github.js +1597 -0
  38. package/packages/hub/src/index.js +79 -0
  39. package/packages/hub/src/labels.js +48 -0
  40. package/packages/identity/README.md +288 -0
  41. package/packages/identity/src/index.js +620 -0
  42. package/packages/identity/src/themes/avatar.js +217 -0
  43. package/packages/identity/src/themes/digimon.js +217 -0
  44. package/packages/identity/src/themes/dragonball.js +217 -0
  45. package/packages/identity/src/themes/lotr.js +217 -0
  46. package/packages/identity/src/themes/marvel.js +217 -0
  47. package/packages/identity/src/themes/pokemon.js +217 -0
  48. package/packages/identity/src/themes/starwars.js +217 -0
  49. package/packages/identity/src/themes/transformers.js +217 -0
  50. package/packages/identity/src/themes/zelda.js +217 -0
  51. package/packages/knowledge/README.md +217 -0
  52. package/packages/knowledge/src/index.js +243 -0
  53. package/packages/log/README.md +93 -0
  54. package/packages/log/src/index.js +252 -0
  55. package/packages/marker/README.md +200 -0
  56. package/packages/marker/src/index.js +184 -0
  57. package/packages/mcp/README.md +323 -0
  58. package/packages/mcp/instructions.md +126 -0
  59. package/packages/mcp/scaffolding/.agents/skills/loreli-context/SKILL.md +89 -0
  60. package/packages/mcp/scaffolding/ISSUE_TEMPLATE/config.yml +2 -0
  61. package/packages/mcp/scaffolding/ISSUE_TEMPLATE/loreli.yml +83 -0
  62. package/packages/mcp/scaffolding/loreli.yml +491 -0
  63. package/packages/mcp/scaffolding/mcp-configs/.codex/config.toml +4 -0
  64. package/packages/mcp/scaffolding/mcp-configs/.cursor/mcp.json +14 -0
  65. package/packages/mcp/scaffolding/mcp-configs/.mcp.json +14 -0
  66. package/packages/mcp/scaffolding/pull-request.md +23 -0
  67. package/packages/mcp/src/index.js +600 -0
  68. package/packages/mcp/src/tools/agent-context.js +44 -0
  69. package/packages/mcp/src/tools/agents.js +450 -0
  70. package/packages/mcp/src/tools/context.js +200 -0
  71. package/packages/mcp/src/tools/github.js +1163 -0
  72. package/packages/mcp/src/tools/hitl.js +162 -0
  73. package/packages/mcp/src/tools/index.js +18 -0
  74. package/packages/mcp/src/tools/refactor.js +227 -0
  75. package/packages/mcp/src/tools/repo.js +44 -0
  76. package/packages/mcp/src/tools/start.js +904 -0
  77. package/packages/mcp/src/tools/status.js +149 -0
  78. package/packages/mcp/src/tools/work.js +134 -0
  79. package/packages/orchestrator/README.md +192 -0
  80. package/packages/orchestrator/src/index.js +1492 -0
  81. package/packages/planner/README.md +251 -0
  82. package/packages/planner/prompts/plan-reviewer.md +109 -0
  83. package/packages/planner/prompts/planner.md +191 -0
  84. package/packages/planner/prompts/tiebreaker-reviewer.md +71 -0
  85. package/packages/planner/src/index.js +1381 -0
  86. package/packages/review/README.md +129 -0
  87. package/packages/review/prompts/reviewer.md +158 -0
  88. package/packages/review/src/index.js +1403 -0
  89. package/packages/risk/README.md +178 -0
  90. package/packages/risk/prompts/risk.md +272 -0
  91. package/packages/risk/src/index.js +439 -0
  92. package/packages/session/README.md +165 -0
  93. package/packages/session/src/index.js +215 -0
  94. package/packages/test-utils/README.md +96 -0
  95. package/packages/test-utils/src/index.js +354 -0
  96. package/packages/tmux/README.md +261 -0
  97. package/packages/tmux/src/index.js +501 -0
  98. package/packages/workflow/README.md +317 -0
  99. package/packages/workflow/prompts/preamble.md +14 -0
  100. package/packages/workflow/src/index.js +660 -0
  101. package/packages/workflow/src/proof-of-life.js +74 -0
  102. package/packages/workspace/README.md +143 -0
  103. package/packages/workspace/src/index.js +1127 -0
  104. package/index.js +0 -8
@@ -0,0 +1,186 @@
1
+ /**
2
+ * Agent trace formatting and token parsing utilities.
3
+ *
4
+ * Produces marker-wrapped `<details>` blocks for embedding agent
5
+ * reasoning, terminal output, and token usage in PR bodies and
6
+ * review comments. Uses the loreli marker system for programmatic
7
+ * detection and stripping.
8
+ *
9
+ * @module loreli/agent/trace
10
+ */
11
+
12
+ import { mark } from 'loreli/marker';
13
+
14
+ /**
15
+ * Registry of regex patterns for extracting token usage from CLI output.
16
+ * Each entry has a `name`, a `pattern` regex, and an `extract` function
17
+ * that receives the match and returns `{ input, output }`.
18
+ *
19
+ * New backends or updated CLI versions can be supported by adding
20
+ * entries here without changing the `tokens()` function itself.
21
+ *
22
+ * @type {Array<{name: string, pattern: RegExp, extract: function}>}
23
+ */
24
+ const TOKEN_PATTERNS = [
25
+ {
26
+ name: 'codex',
27
+ pattern: /tokens?\s+used\s*\n?\s*(\d[\d,]*)/i,
28
+ extract(m) {
29
+ const total = parseInt(m[1].replaceAll(',', ''), 10);
30
+ return { input: 0, output: total };
31
+ }
32
+ },
33
+ {
34
+ name: 'codex-split',
35
+ pattern: /input[:\s]+(\d[\d,]*)\s*(?:tokens?)?\s*[,|/]\s*output[:\s]+(\d[\d,]*)/i,
36
+ extract(m) {
37
+ return {
38
+ input: parseInt(m[1].replaceAll(',', ''), 10),
39
+ output: parseInt(m[2].replaceAll(',', ''), 10)
40
+ };
41
+ }
42
+ },
43
+ {
44
+ name: 'claude',
45
+ pattern: /total\s+tokens?[:\s]+input\s*=\s*(\d[\d,]*)\s+output\s*=\s*(\d[\d,]*)/i,
46
+ extract(m) {
47
+ return {
48
+ input: parseInt(m[1].replaceAll(',', ''), 10),
49
+ output: parseInt(m[2].replaceAll(',', ''), 10)
50
+ };
51
+ }
52
+ }
53
+ ];
54
+
55
+ /**
56
+ * Parse token usage from CLI terminal output.
57
+ *
58
+ * Iterates the pattern registry and returns the first match.
59
+ * Returns `null` when no recognized pattern is found — this is
60
+ * expected for backends like cursor-agent that do not print
61
+ * token info.
62
+ *
63
+ * @param {string} text - Cleaned terminal output.
64
+ * @returns {{ input: number, output: number } | null}
65
+ */
66
+ export function tokens(text) {
67
+ if (!text) return null;
68
+
69
+ for (const entry of TOKEN_PATTERNS) {
70
+ const m = text.match(entry.pattern);
71
+ if (m) return entry.extract(m);
72
+ }
73
+
74
+ return null;
75
+ }
76
+
77
+ /**
78
+ * Escape captured output so it cannot interfere with the loreli
79
+ * marker system or break markdown rendering.
80
+ *
81
+ * Strips:
82
+ * - HTML comments that look like loreli markers (`<!-- loreli:... -->`)
83
+ * - Null bytes and other ASCII control characters (except newline/tab)
84
+ *
85
+ * @param {string} text - Raw captured output.
86
+ * @returns {string} Sanitized text safe for embedding in code fences.
87
+ */
88
+ export function escape(text) {
89
+ if (!text) return '';
90
+ return text
91
+ .replace(/<!-- loreli:[^>]*-->/g, '')
92
+ // eslint-disable-next-line no-control-regex
93
+ .replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, '');
94
+ }
95
+
96
+ /**
97
+ * Determine the minimum number of backticks needed for a code fence
98
+ * that will not be broken by backtick sequences in the content.
99
+ *
100
+ * @param {string} text - Content to be fenced.
101
+ * @returns {number} Fence length (at least 3).
102
+ */
103
+ function fenceLength(text) {
104
+ let max = 0;
105
+ const re = /`+/g;
106
+ let m;
107
+ while ((m = re.exec(text)) !== null) {
108
+ if (m[0].length > max) max = m[0].length;
109
+ }
110
+ return Math.max(3, max + 1);
111
+ }
112
+
113
+ /**
114
+ * Format a complete agent trace block.
115
+ *
116
+ * Produces a marker-delimited `<details>` section suitable for
117
+ * embedding in PR bodies or review comments. The block is wrapped
118
+ * with `loreli:trace` / `loreli:trace-end` markers so downstream
119
+ * consumers can strip it via `excise(body, 'trace')`.
120
+ *
121
+ * Sections are omitted when their data is absent:
122
+ * - `reasoning` — agent-provided summary of approach
123
+ * - `output` — captured terminal output (in a code fence)
124
+ * - `usage` — token counts from `tokens()`
125
+ *
126
+ * @param {string} name - Agent identity name (e.g. 'bumblebee-0').
127
+ * @param {object} data - Trace data.
128
+ * @param {string} [data.reasoning] - Agent-provided reasoning summary.
129
+ * @param {string} [data.output] - Captured terminal output.
130
+ * @param {{ input: number, output: number } | null} [data.usage] - Token usage.
131
+ * @param {string} [data.model] - Model identifier for the usage table.
132
+ * @param {number} [data.duration] - Duration in milliseconds.
133
+ * @returns {string} Complete trace block with markers.
134
+ */
135
+ export function format(name, data = {}) {
136
+ const { reasoning, output: raw, usage, model, duration } = data;
137
+ const sections = [];
138
+
139
+ if (reasoning) {
140
+ sections.push('### Reasoning\n');
141
+ sections.push(reasoning);
142
+ }
143
+
144
+ if (raw) {
145
+ const cleaned = escape(raw);
146
+ const len = fenceLength(cleaned);
147
+ const fence = '`'.repeat(len);
148
+ sections.push('### Output\n');
149
+ sections.push(`${fence}\n${cleaned}\n${fence}`);
150
+ }
151
+
152
+ if (usage || model || duration != null) {
153
+ const rows = [];
154
+ if (usage?.input != null) rows.push(`| Input tokens | ${usage.input.toLocaleString()} |`);
155
+ if (usage?.output != null) rows.push(`| Output tokens | ${usage.output.toLocaleString()} |`);
156
+ if (model) rows.push(`| Model | ${model} |`);
157
+ if (duration != null) {
158
+ const secs = Math.round(duration / 1000);
159
+ const mins = Math.floor(secs / 60);
160
+ const rem = secs % 60;
161
+ const display = mins > 0 ? `${mins}m ${rem}s` : `${secs}s`;
162
+ rows.push(`| Duration | ${display} |`);
163
+ }
164
+ if (rows.length) {
165
+ sections.push('### Usage\n');
166
+ sections.push(`| Metric | Value |\n|--------|-------|\n${rows.join('\n')}`);
167
+ }
168
+ }
169
+
170
+ if (!sections.length) return '';
171
+
172
+ const body = sections.join('\n\n');
173
+ const startMarker = mark('trace', { agent: name });
174
+ const endMarker = `<!-- loreli:trace-end -->`;
175
+
176
+ return [
177
+ startMarker,
178
+ `<details>`,
179
+ `<summary>Agent Trace (${name})</summary>`,
180
+ '',
181
+ body,
182
+ '',
183
+ `</details>`,
184
+ endMarker
185
+ ].join('\n');
186
+ }
@@ -0,0 +1,136 @@
1
+ # loreli/classify
2
+
3
+ Prompt-driven LLM classification. Loads a named Mustache template from disk, renders it with the provided content and variables, sends the result through `backends.oneshot()`, and returns the parsed JSON response. The prompt template defines the response shape — classify is generic plumbing.
4
+
5
+ ## Installation
6
+
7
+ Part of the Loreli monorepo. Import via the package exports map:
8
+
9
+ ```js
10
+ import { classify } from 'loreli/classify';
11
+ ```
12
+
13
+ ## Quick Start
14
+
15
+ Classification requires a `BackendRegistry` with at least one available LLM backend and a prompt template in `packages/classify/prompts/`:
16
+
17
+ ```js
18
+ import { classify } from 'loreli/classify';
19
+
20
+ const result = await classify('pane-state', paneOutput, {
21
+ backends: backendRegistry
22
+ });
23
+ // => { category: 'option_dialog', reasoning: 'Trust dialog detected', confidence: 0.9 }
24
+ ```
25
+
26
+ The first argument is the template name — it resolves to `prompts/<name>.md` inside the package. The second argument is the text to classify. The template defines what categories exist, what JSON shape to return, and how the LLM should reason about the input.
27
+
28
+ ## How It Works
29
+
30
+ ```text
31
+ classify('pane-state', text, opts)
32
+
33
+ ├─ Load prompts/pane-state.md
34
+ ├─ Mustache.render(template, { content: text, ...opts.vars })
35
+ ├─ backends.oneshot(rendered, { model, timeout })
36
+ └─ Parse JSON from LLM response → return object
37
+ ```
38
+
39
+ 1. **Load** — Reads `prompts/<name>.md` from disk.
40
+ 2. **Render** — Runs Mustache templating. The content is available as `{{{content}}}` (triple-stache, unescaped). Extra variables via `opts.vars` are also available.
41
+ 3. **Send** — Calls `backends.oneshot()` with the rendered prompt.
42
+ 4. **Parse** — Extracts the first `{...}` JSON object from the response, handling markdown fences and preamble text.
43
+
44
+ ## Prompt Templates
45
+
46
+ Templates are Markdown files in `packages/classify/prompts/`. Each template contains the full classification instructions for the LLM, including category definitions and the expected JSON output shape.
47
+
48
+ ### `pane-state.md` — orchestrator stall detection
49
+
50
+ Used by the orchestrator's monitor loop and rapid-death detector to diagnose agent state. Returns `{category, reasoning, confidence}` with states: `working`, `waiting_for_input`, `option_dialog`, `error_loop`, `idle`, `fatal`, `dead`. The `dead` state identifies agents whose process exited or crashed — the orchestrator uses it in the rapid-death window (first 15s after spawn) to classify *why* an agent died, replacing the old binary alive/dead check with diagnostic context.
51
+
52
+ ### `feedback.md` — knowledge feedback categorization
53
+
54
+ Used by `loreli/knowledge` to classify review feedback. Returns `{category, reasoning, confidence}` with categories: `naming`, `architecture`, `testing`, `documentation`, `performance`, `security`.
55
+
56
+ ### `blocker.md` — knowledge per-ref blocker detection
57
+
58
+ Used by `loreli/knowledge` to classify issue/PR references as blockers or informational. Receives `{{{content}}}` (joined discussion text) and `{{refs}}` (formatted ref list). Returns `{blockers: [number], references: [number]}`.
59
+
60
+ ### Writing a new template
61
+
62
+ Create a new `.md` file in `prompts/`. Use `{{{content}}}` for the text to classify (triple-stache prevents HTML escaping). Use `{{varName}}` for additional variables passed via `opts.vars`. End the template with the expected JSON shape so the LLM knows what to return.
63
+
64
+ The following example shows how a severity template might look:
65
+
66
+ ```markdown
67
+ Classify this error message by severity.
68
+
69
+ Levels:
70
+ - critical: System is down or data loss is occurring
71
+ - warning: Something is wrong but the system is functional
72
+ - info: Normal operational message
73
+
74
+ Respond with ONLY a JSON object.
75
+ {"level": "<severity>", "reasoning": "<one sentence>"}
76
+
77
+ {{{content}}}
78
+ ```
79
+
80
+ Then call it:
81
+
82
+ ```js
83
+ const result = await classify('severity', errorMessage, { backends });
84
+ // => { level: 'critical', reasoning: 'Database connection lost' }
85
+ ```
86
+
87
+ ## API Reference
88
+
89
+ ### `classify(name, content, opts)`
90
+
91
+ Run a named classification prompt against content via LLM.
92
+
93
+ **Parameters:**
94
+
95
+ | Name | Type | Default | Description |
96
+ |------|------|---------|-------------|
97
+ | `name` | `string` | — | Template name — resolves to `prompts/<name>.md`. |
98
+ | `content` | `string` | — | Text to classify — injected as `{{{content}}}`. |
99
+ | `opts.backends` | `BackendRegistry` | — | **Required.** Backend registry with `oneshot()` method. |
100
+ | `opts.config` | `Config` | `undefined` | Config instance for model/timeout resolution. |
101
+ | `opts.model` | `string` | `'fast'` | Model alias override. Falls back to `config.classify.model`, then `'fast'`. |
102
+ | `opts.timeout` | `number` | `30000` | Timeout in ms. Falls back to `config.classify.timeout`, then `30000`. |
103
+ | `opts.vars` | `object` | `{}` | Extra Mustache variables beyond `content`. |
104
+
105
+ **Returns:** `Promise<object>` — Parsed JSON from the LLM. Shape is defined by the prompt template, not enforced by classify.
106
+
107
+ **Throws:**
108
+
109
+ | Error | Cause |
110
+ |-------|-------|
111
+ | `classify() requires a backends instance` | `opts.backends` is missing or falsy. |
112
+ | `ENOENT` | Template file `prompts/<name>.md` does not exist. |
113
+ | LLM error (propagated) | `backends.oneshot()` threw (timeout, network, etc.). |
114
+ | `classify: LLM response contains no JSON object` | Response had no `{...}` block. |
115
+ | `classify: failed to parse JSON from LLM response` | Found `{...}` but it was not valid JSON. |
116
+
117
+ ## Configuration
118
+
119
+ The classify package reads configuration from the `classify` section in `loreli.yml`:
120
+
121
+ ```yaml
122
+ classify:
123
+ model: fast # Model alias — resolves via backends
124
+ maxLines: 100 # Lines of pane output to capture (used by orchestrator)
125
+ timeout: 30s # Timeout for the oneshot CLI call
126
+ maxRetries: 5 # Consecutive failures before safety-net kill
127
+ ```
128
+
129
+ ## Error Reference
130
+
131
+ | Error | Cause | Resolution |
132
+ |-------|-------|------------|
133
+ | `classify() requires a backends instance` | No `backends` option provided. | Pass a `BackendRegistry` instance in `opts.backends`. |
134
+ | Template `ENOENT` | Named template does not exist in `prompts/`. | Create the template file or check the name for typos. |
135
+ | `LLM response contains no JSON object` | The LLM returned prose without any JSON. | Check the template instructions — they should explicitly ask for JSON-only output. |
136
+ | `failed to parse JSON from LLM response` | JSON was found but was malformed. | Usually a transient LLM issue. Retry or use a more capable model. |
@@ -0,0 +1,12 @@
1
+ Analyze this discussion text and classify each referenced issue/PR number as either a blocking dependency or an informational reference.
2
+
3
+ Referenced issues: {{refs}}
4
+
5
+ A reference is a **blocker** if the text indicates it must be resolved, merged, or closed before this work can proceed (e.g., "blocked by", "depends on", "needs #N merged first", "waiting for #N").
6
+
7
+ A reference is **informational** if it is mentioned for context, background, or related reading (e.g., "see #N for context", "related to #N", "similar to #N").
8
+
9
+ Respond with ONLY a JSON object. Do not wrap in markdown. Do not add any other text.
10
+ {"blockers": [<number>, ...], "references": [<number>, ...]}
11
+
12
+ {{{content}}}
@@ -0,0 +1,14 @@
1
+ Classify this code review feedback into exactly one category.
2
+
3
+ Categories:
4
+ - naming: Feedback about naming conventions, variable names, or renaming suggestions
5
+ - architecture: Feedback about code structure, module organization, or refactoring
6
+ - testing: Feedback about test coverage, assertions, or testing practices
7
+ - documentation: Feedback about documentation, README, JSDoc, or code comments
8
+ - performance: Feedback about performance optimization, memory, or caching
9
+ - security: Feedback about security, secrets, authentication, or vulnerabilities
10
+
11
+ Respond with ONLY a JSON object. Do not wrap in markdown. Do not add any other text.
12
+ {"category": "<name>", "reasoning": "<one sentence explanation>", "confidence": <0.0 to 1.0>}
13
+
14
+ {{{content}}}
@@ -0,0 +1,20 @@
1
+ Classify this terminal output from an AI coding agent into exactly one state.
2
+ {{#model}}
3
+ The agent was launched with model `{{model}}` on the `{{backend}}` backend (role: {{role}}).
4
+ {{/model}}
5
+
6
+ States:
7
+ - working: Agent is mid-task, output is progressing normally
8
+ - waiting_for_input: Agent at a prompt waiting for user input
9
+ - option_dialog: Agent showing a Y/N or selection dialog that needs a keystroke
10
+ - error_loop: Agent repeating the same error without making progress
11
+ - idle: Agent finished all tasks or has no pending work
12
+ - fatal: Agent hit a fatal infrastructure error (rate limit, auth failure, budget exhaustion, invalid model)
13
+ - dead: Agent process exited or crashed — output shows exit code, stack trace, or abrupt termination
14
+
15
+ For option_dialog, include the tmux key names needed to dismiss the dialog in `remedy` (e.g. "Enter", "Down Enter", "Escape"). For all other states, set remedy to null.
16
+
17
+ Respond with ONLY a JSON object. Do not wrap in markdown. Do not add any other text.
18
+ {"category": "<state>", "reasoning": "<one sentence explanation>", "confidence": <0.0 to 1.0>, "remedy": "<tmux keys or null>"}
19
+
20
+ {{{content}}}
@@ -0,0 +1,81 @@
1
+ /**
2
+ * Prompt-driven LLM classification.
3
+ *
4
+ * Loads a named Mustache template from disk, renders it with the provided
5
+ * content and variables, sends the result through `backends.oneshot()`,
6
+ * and returns the parsed JSON response. The prompt template defines the
7
+ * response shape — classify is generic plumbing.
8
+ *
9
+ * @module loreli/classify
10
+ */
11
+
12
+ import { readFile } from 'node:fs/promises';
13
+ import { join, dirname } from 'node:path';
14
+ import { fileURLToPath } from 'node:url';
15
+ import Mustache from 'mustache';
16
+
17
+ const __dirname = dirname(fileURLToPath(import.meta.url));
18
+ const prompts = join(__dirname, '..', 'prompts');
19
+
20
+ /**
21
+ * Extract a JSON object from LLM response text.
22
+ *
23
+ * LLMs sometimes wrap JSON in markdown fences or add preamble.
24
+ * This extracts the first `{...}` block from the response.
25
+ *
26
+ * @param {string} raw - Raw LLM response.
27
+ * @returns {object} Parsed JSON object.
28
+ * @throws {Error} When no valid JSON is found in the response.
29
+ */
30
+ function extract(raw) {
31
+ const fenced = raw.match(/```(?:json)?\s*([\s\S]*?)```/);
32
+ const json = fenced ? fenced[1].trim() : raw.trim();
33
+
34
+ const start = json.indexOf('{');
35
+ const end = json.lastIndexOf('}');
36
+ if (start === -1 || end === -1) {
37
+ throw new Error('classify: LLM response contains no JSON object');
38
+ }
39
+
40
+ try {
41
+ return JSON.parse(json.slice(start, end + 1));
42
+ } catch (err) {
43
+ throw new Error(`classify: failed to parse JSON from LLM response — ${err.message}`);
44
+ }
45
+ }
46
+
47
+ /**
48
+ * Run a named classification prompt against content via LLM.
49
+ *
50
+ * Loads `prompts/<name>.md`, renders it with Mustache using `content`
51
+ * and any extra `vars`, sends the rendered prompt through
52
+ * `backends.oneshot()`, and returns the parsed JSON from the response.
53
+ *
54
+ * @param {string} name - Prompt template name (resolves to `prompts/<name>.md`).
55
+ * @param {string} content - Text to classify — injected as `{{{content}}}`.
56
+ * @param {object} opts - Options.
57
+ * @param {object} opts.backends - BackendRegistry instance with a `oneshot()` method. Required.
58
+ * @param {object} [opts.config] - Config instance for model/timeout resolution.
59
+ * @param {string} [opts.model] - Model alias override.
60
+ * @param {number} [opts.timeout] - Timeout for the oneshot call in ms.
61
+ * @param {object} [opts.vars] - Extra Mustache variables beyond `content`.
62
+ * @returns {Promise<object>} Parsed JSON from the LLM response. Shape is prompt-defined.
63
+ * @throws {Error} When backends is missing, template not found, oneshot fails, or response has no valid JSON.
64
+ */
65
+ export async function classify(name, content, opts = {}) {
66
+ const { backends, config, model, timeout, vars } = opts;
67
+
68
+ if (!backends) throw new Error('classify() requires a backends instance');
69
+
70
+ const path = join(prompts, `${name}.md`);
71
+ const template = await readFile(path, 'utf8');
72
+ const rendered = Mustache.render(template, { content, ...vars });
73
+
74
+ const raw = await backends.oneshot(rendered, {
75
+ model: model ?? config?.get?.('classify.model') ?? 'fast',
76
+ config,
77
+ timeout: timeout ?? config?.get?.('classify.timeout') ?? 60000
78
+ });
79
+
80
+ return extract(raw);
81
+ }