loreli 1.0.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +66 -26
- package/package.json +17 -14
- package/packages/action/prompts/action.md +172 -0
- package/packages/action/src/index.js +33 -5
- package/packages/agent/README.md +107 -18
- package/packages/agent/src/backends/claude.js +111 -11
- package/packages/agent/src/backends/codex.js +78 -5
- package/packages/agent/src/backends/cursor.js +104 -27
- package/packages/agent/src/backends/index.js +162 -5
- package/packages/agent/src/cli.js +80 -3
- package/packages/agent/src/discover.js +396 -0
- package/packages/agent/src/factory.js +39 -34
- package/packages/agent/src/models.js +24 -6
- package/packages/classify/README.md +136 -0
- package/packages/classify/prompts/blocker.md +12 -0
- package/packages/classify/prompts/feedback.md +14 -0
- package/packages/classify/prompts/pane-state.md +20 -0
- package/packages/classify/src/index.js +81 -0
- package/packages/config/README.md +156 -91
- package/packages/config/src/defaults.js +32 -21
- package/packages/config/src/index.js +33 -2
- package/packages/config/src/schema.js +57 -39
- package/packages/hub/src/github.js +59 -20
- package/packages/identity/README.md +1 -1
- package/packages/identity/src/index.js +2 -2
- package/packages/knowledge/README.md +86 -106
- package/packages/knowledge/src/index.js +56 -225
- package/packages/mcp/README.md +51 -7
- package/packages/mcp/instructions.md +6 -1
- package/packages/mcp/scaffolding/loreli.yml +115 -77
- package/packages/mcp/scaffolding/mcp-configs/.codex/config.toml +1 -0
- package/packages/mcp/scaffolding/mcp-configs/.cursor/mcp.json +4 -1
- package/packages/mcp/scaffolding/mcp-configs/.mcp.json +4 -1
- package/packages/mcp/src/index.js +45 -16
- package/packages/mcp/src/tools/agent-context.js +44 -0
- package/packages/mcp/src/tools/agents.js +34 -13
- package/packages/mcp/src/tools/context.js +3 -2
- package/packages/mcp/src/tools/github.js +11 -47
- package/packages/mcp/src/tools/hitl.js +19 -6
- package/packages/mcp/src/tools/index.js +2 -1
- package/packages/mcp/src/tools/refactor.js +227 -0
- package/packages/mcp/src/tools/repo.js +44 -0
- package/packages/mcp/src/tools/start.js +159 -90
- package/packages/mcp/src/tools/status.js +5 -2
- package/packages/mcp/src/tools/work.js +18 -8
- package/packages/orchestrator/src/index.js +345 -79
- package/packages/planner/README.md +84 -1
- package/packages/planner/prompts/plan-reviewer.md +109 -0
- package/packages/planner/prompts/planner.md +191 -0
- package/packages/planner/prompts/tiebreaker-reviewer.md +71 -0
- package/packages/planner/src/index.js +326 -111
- package/packages/review/README.md +2 -2
- package/packages/review/prompts/reviewer.md +158 -0
- package/packages/review/src/index.js +196 -76
- package/packages/risk/README.md +81 -22
- package/packages/risk/prompts/risk.md +272 -0
- package/packages/risk/src/index.js +44 -33
- package/packages/tmux/src/index.js +61 -12
- package/packages/workflow/README.md +18 -14
- package/packages/workflow/prompts/preamble.md +14 -0
- package/packages/workflow/src/index.js +191 -12
- package/packages/workspace/README.md +2 -2
- package/packages/workspace/src/index.js +69 -18
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
import { execFile as execFileCb } from 'node:child_process';
|
|
2
|
+
import { promisify } from 'node:util';
|
|
3
|
+
import { logger } from 'loreli/log';
|
|
4
|
+
import { defaults } from 'loreli/config';
|
|
5
|
+
|
|
6
|
+
const execFile = promisify(execFileCb);
|
|
7
|
+
const log = logger('discover');
|
|
8
|
+
|
|
9
|
+
const PROXY_BACKENDS = [
|
|
10
|
+
{
|
|
11
|
+
backend: 'claude',
|
|
12
|
+
baseKey: 'ANTHROPIC_BASE_URL',
|
|
13
|
+
keyOrder: ['ANTHROPIC_API_KEY', 'OPENAI_API_KEY']
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
backend: 'codex',
|
|
17
|
+
baseKey: 'OPENAI_BASE_URL',
|
|
18
|
+
keyOrder: ['OPENAI_API_KEY', 'ANTHROPIC_API_KEY']
|
|
19
|
+
}
|
|
20
|
+
];
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Known tiers in priority order (lowest capability first).
|
|
24
|
+
* @type {string[]}
|
|
25
|
+
*/
|
|
26
|
+
const TIERS = ['fast', 'balanced', 'powerful'];
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Detect AI provider from a cursor-agent model ID.
|
|
30
|
+
*
|
|
31
|
+
* @param {string} id - Model identifier.
|
|
32
|
+
* @returns {'openai'|'anthropic'|null} Provider, or null for unsupported models.
|
|
33
|
+
*/
|
|
34
|
+
function provider(id) {
|
|
35
|
+
if (/^(gpt-|o[134]|codex-)/.test(id)) return 'openai';
|
|
36
|
+
if (/^(opus-|sonnet-|haiku-|claude-)/.test(id)) return 'anthropic';
|
|
37
|
+
return null;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Read an env value with config override precedence.
|
|
42
|
+
*
|
|
43
|
+
* @param {object|undefined} config - Config instance with `get()`.
|
|
44
|
+
* @param {string} backend - Backend name.
|
|
45
|
+
* @param {string} key - Env key name.
|
|
46
|
+
* @returns {string|undefined} Resolved env value.
|
|
47
|
+
*/
|
|
48
|
+
function envValue(config, backend, key) {
|
|
49
|
+
const fromConfig = config?.get?.(`backends.${backend}.env.${key}`);
|
|
50
|
+
if (typeof fromConfig === 'string' && fromConfig.length) return fromConfig;
|
|
51
|
+
const fromProcess = process.env[key];
|
|
52
|
+
if (typeof fromProcess === 'string' && fromProcess.length) return fromProcess;
|
|
53
|
+
return undefined;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Resolve timeout for proxy model discovery.
|
|
58
|
+
*
|
|
59
|
+
* @param {object|undefined} config - Config instance with `get()`.
|
|
60
|
+
* @returns {number} Timeout in milliseconds.
|
|
61
|
+
*/
|
|
62
|
+
function proxyTimeout(config) {
|
|
63
|
+
const configured = config?.get?.('timeouts.proxyDiscovery');
|
|
64
|
+
if (typeof configured === 'number' && configured > 0) return configured;
|
|
65
|
+
const fallback = defaults?.timeouts?.proxyDiscovery;
|
|
66
|
+
if (typeof fallback === 'number' && fallback > 0) return fallback;
|
|
67
|
+
return 5000;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Build endpoint candidates for proxy model discovery.
|
|
72
|
+
*
|
|
73
|
+
* Handles base URLs with and without `/v1` to avoid producing
|
|
74
|
+
* invalid `/v1/v1/models` paths.
|
|
75
|
+
*
|
|
76
|
+
* @param {string} baseUrl - Proxy base URL.
|
|
77
|
+
* @returns {string[]} Candidate absolute URLs in priority order.
|
|
78
|
+
*/
|
|
79
|
+
function endpointCandidates(baseUrl) {
|
|
80
|
+
const base = baseUrl.endsWith('/') ? baseUrl : `${baseUrl}/`;
|
|
81
|
+
const parsed = new URL(base);
|
|
82
|
+
const path = parsed.pathname.replace(/\/+$/, '');
|
|
83
|
+
const hasV1 = path.endsWith('/v1');
|
|
84
|
+
const candidates = [];
|
|
85
|
+
|
|
86
|
+
if (hasV1) {
|
|
87
|
+
candidates.push(new URL('models', base).toString());
|
|
88
|
+
candidates.push(new URL('../models', base).toString());
|
|
89
|
+
} else {
|
|
90
|
+
candidates.push(new URL('v1/models', base).toString());
|
|
91
|
+
candidates.push(new URL('models', base).toString());
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return [...new Set(candidates)];
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Normalize a base URL for cache keying.
|
|
99
|
+
*
|
|
100
|
+
* @param {string} baseUrl - Base URL.
|
|
101
|
+
* @returns {string} Normalized URL string.
|
|
102
|
+
*/
|
|
103
|
+
function normalizeBase(baseUrl) {
|
|
104
|
+
const parsed = new URL(baseUrl);
|
|
105
|
+
parsed.search = '';
|
|
106
|
+
parsed.hash = '';
|
|
107
|
+
parsed.pathname = parsed.pathname.replace(/\/+$/, '') || '/';
|
|
108
|
+
return parsed.toString();
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Query one proxy endpoint candidate for model listing.
|
|
113
|
+
*
|
|
114
|
+
* @param {string} endpoint - Candidate endpoint URL.
|
|
115
|
+
* @param {string|undefined} apiKey - Optional bearer token.
|
|
116
|
+
* @param {number} timeout - Request timeout in milliseconds.
|
|
117
|
+
* @returns {Promise<null|object[]>} Raw model objects.
|
|
118
|
+
*/
|
|
119
|
+
async function requestModels(endpoint, apiKey, timeout) {
|
|
120
|
+
const headers = {};
|
|
121
|
+
if (apiKey) headers.Authorization = `Bearer ${apiKey}`;
|
|
122
|
+
|
|
123
|
+
const response = await fetch(endpoint, {
|
|
124
|
+
headers,
|
|
125
|
+
signal: AbortSignal.timeout(timeout)
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
if (!response.ok) return null;
|
|
129
|
+
|
|
130
|
+
let body;
|
|
131
|
+
try {
|
|
132
|
+
body = await response.json();
|
|
133
|
+
} catch {
|
|
134
|
+
return null;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return Array.isArray(body?.data) ? body.data : null;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Convert proxy `/models` payload into internal discovery shape.
|
|
142
|
+
*
|
|
143
|
+
* Keeps all model IDs for `validate()`; provider-aware tier mapping
|
|
144
|
+
* is derived from recognizable IDs only.
|
|
145
|
+
*
|
|
146
|
+
* @param {object[]} entries - Raw model entries.
|
|
147
|
+
* @returns {{models: object[], tiers: object}|null} Discovery result.
|
|
148
|
+
*/
|
|
149
|
+
function toDiscovery(entries) {
|
|
150
|
+
const models = [];
|
|
151
|
+
|
|
152
|
+
for (const entry of entries) {
|
|
153
|
+
if (!entry?.id) continue;
|
|
154
|
+
const id = String(entry.id);
|
|
155
|
+
models.push({
|
|
156
|
+
id,
|
|
157
|
+
name: id,
|
|
158
|
+
provider: provider(id),
|
|
159
|
+
tier: tier(id),
|
|
160
|
+
marker: null
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
if (!models.length) return null;
|
|
165
|
+
return { models, tiers: tiers(models) };
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Discover proxy models using endpoint candidates and optional auth.
|
|
170
|
+
*
|
|
171
|
+
* @param {string} baseUrl - Proxy base URL.
|
|
172
|
+
* @param {string|undefined} apiKey - Optional bearer token.
|
|
173
|
+
* @param {number} timeout - Request timeout in milliseconds.
|
|
174
|
+
* @returns {Promise<{models: object[], tiers: object}|null>} Discovery result.
|
|
175
|
+
*/
|
|
176
|
+
async function discoverProxy(baseUrl, apiKey, timeout) {
|
|
177
|
+
for (const endpoint of endpointCandidates(baseUrl)) {
|
|
178
|
+
const data = await requestModels(endpoint, apiKey, timeout);
|
|
179
|
+
if (!data) continue;
|
|
180
|
+
const discovery = toDiscovery(data);
|
|
181
|
+
if (discovery) return discovery;
|
|
182
|
+
}
|
|
183
|
+
return null;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Return unique auth key candidates in order.
|
|
188
|
+
*
|
|
189
|
+
* @param {object|undefined} config - Config instance with `get()`.
|
|
190
|
+
* @param {string} backend - Backend name.
|
|
191
|
+
* @param {string[]} order - Env key names in priority order.
|
|
192
|
+
* @returns {(string|undefined)[]} Ordered key candidates.
|
|
193
|
+
*/
|
|
194
|
+
function authKeys(config, backend, order) {
|
|
195
|
+
const keys = [];
|
|
196
|
+
for (const keyName of order) {
|
|
197
|
+
const value = envValue(config, backend, keyName);
|
|
198
|
+
if (value && !keys.includes(value)) keys.push(value);
|
|
199
|
+
}
|
|
200
|
+
if (!keys.length) keys.push(undefined);
|
|
201
|
+
return keys;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Patterns that force a model into a specific tier.
|
|
206
|
+
* Checked in order — first match wins.
|
|
207
|
+
* @type {Array<{pattern: RegExp, tier: string}>}
|
|
208
|
+
*/
|
|
209
|
+
const TIER_RULES = [
|
|
210
|
+
{ pattern: /-(low|mini)\b/, tier: 'fast' },
|
|
211
|
+
{ pattern: /(^|-)haiku-/, tier: 'fast' },
|
|
212
|
+
{ pattern: /\bflash\b/, tier: 'fast' },
|
|
213
|
+
{ pattern: /-(xhigh|max)\b/, tier: 'powerful' },
|
|
214
|
+
{ pattern: /(^|-)opus-/, tier: 'powerful' },
|
|
215
|
+
{ pattern: /^o3\b/, tier: 'powerful' },
|
|
216
|
+
{ pattern: /-high\b/, tier: 'powerful' }
|
|
217
|
+
];
|
|
218
|
+
|
|
219
|
+
/**
|
|
220
|
+
* Classify a model ID into a capability tier.
|
|
221
|
+
*
|
|
222
|
+
* @param {string} id - Model identifier.
|
|
223
|
+
* @returns {string} Tier name ('fast', 'balanced', or 'powerful').
|
|
224
|
+
*/
|
|
225
|
+
function tier(id) {
|
|
226
|
+
for (const rule of TIER_RULES) {
|
|
227
|
+
if (rule.pattern.test(id)) return rule.tier;
|
|
228
|
+
}
|
|
229
|
+
return 'balanced';
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
/**
|
|
233
|
+
* Parse the structured output of `cursor-agent --list-models`.
|
|
234
|
+
*
|
|
235
|
+
* Each line is `id - Human Name` with optional `(default)` or `(current)` markers.
|
|
236
|
+
* Only models from supported providers (openai, anthropic) are returned.
|
|
237
|
+
*
|
|
238
|
+
* @param {string} output - Raw stdout from `cursor-agent --list-models`.
|
|
239
|
+
* @returns {Array<{id: string, name: string, provider: string, tier: string, marker: string|null}>}
|
|
240
|
+
*/
|
|
241
|
+
export function parseCursor(output) {
|
|
242
|
+
const models = [];
|
|
243
|
+
for (const line of output.split('\n')) {
|
|
244
|
+
const match = line.match(/^(\S+)\s+-\s+(.+?)(?:\s+\((default|current)\))?\s*$/);
|
|
245
|
+
if (!match) continue;
|
|
246
|
+
|
|
247
|
+
const [, id, name, marker] = match;
|
|
248
|
+
const p = provider(id);
|
|
249
|
+
if (!p) continue;
|
|
250
|
+
|
|
251
|
+
models.push({
|
|
252
|
+
id,
|
|
253
|
+
name: name.trim(),
|
|
254
|
+
provider: p,
|
|
255
|
+
tier: tier(id),
|
|
256
|
+
marker: marker ?? null
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
return models;
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
/**
|
|
263
|
+
* Build a tier map from a flat list of discovered models.
|
|
264
|
+
*
|
|
265
|
+
* Groups models by provider, then for each tier picks the best candidate:
|
|
266
|
+
* models marked as `default` or `current` are preferred, then the
|
|
267
|
+
* lexicographically last ID (usually the latest generation).
|
|
268
|
+
*
|
|
269
|
+
* @param {Array<{id: string, provider: string, tier: string, marker: string|null}>} models
|
|
270
|
+
* @returns {Record<string, Record<string, string>>} `{ fast: { openai: 'gpt-...', anthropic: '...' }, ... }`
|
|
271
|
+
*/
|
|
272
|
+
export function tiers(models) {
|
|
273
|
+
const grouped = {};
|
|
274
|
+
for (const m of models) {
|
|
275
|
+
const key = `${m.provider}:${m.tier}`;
|
|
276
|
+
if (!grouped[key]) grouped[key] = [];
|
|
277
|
+
grouped[key].push(m);
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
const result = {};
|
|
281
|
+
for (const t of TIERS) {
|
|
282
|
+
for (const prov of ['openai', 'anthropic']) {
|
|
283
|
+
const candidates = grouped[`${prov}:${t}`];
|
|
284
|
+
if (!candidates?.length) continue;
|
|
285
|
+
|
|
286
|
+
const best = candidates.find(function marked(m) { return m.marker; })
|
|
287
|
+
?? candidates.sort(function latest(a, b) { return b.id.localeCompare(a.id); })[0];
|
|
288
|
+
|
|
289
|
+
if (!result[t]) result[t] = {};
|
|
290
|
+
result[t][prov] = best.id;
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
return result;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
/**
|
|
297
|
+
* Discover available models from cursor-agent CLI.
|
|
298
|
+
*
|
|
299
|
+
* @returns {Promise<{models: object[], tiers: object}|null>} Discovery result, or null on failure.
|
|
300
|
+
*/
|
|
301
|
+
export async function discoverCursor() {
|
|
302
|
+
try {
|
|
303
|
+
const { stdout } = await execFile('cursor-agent', ['--list-models'], {
|
|
304
|
+
timeout: 10000,
|
|
305
|
+
maxBuffer: 64 * 1024
|
|
306
|
+
});
|
|
307
|
+
|
|
308
|
+
const models = parseCursor(stdout);
|
|
309
|
+
if (!models.length) {
|
|
310
|
+
log.warn('cursor-agent --list-models returned no supported models');
|
|
311
|
+
return null;
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
log.info(`cursor: discovered ${models.length} models`);
|
|
315
|
+
return { models, tiers: tiers(models) };
|
|
316
|
+
} catch (err) {
|
|
317
|
+
log.warn(`cursor model discovery failed: ${err.message}`);
|
|
318
|
+
return null;
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
/**
|
|
323
|
+
* Run model discovery for all available backends.
|
|
324
|
+
*
|
|
325
|
+
* Discovery sources:
|
|
326
|
+
* - `cursor`: `cursor-agent --list-models`
|
|
327
|
+
* - `claude` / `codex`: proxy model listing when corresponding
|
|
328
|
+
* base URLs are configured (ANTHROPIC_BASE_URL / OPENAI_BASE_URL)
|
|
329
|
+
*
|
|
330
|
+
* Results are cached on the registry for use during model resolution.
|
|
331
|
+
*
|
|
332
|
+
* @param {import('./backends/index.js').BackendRegistry} registry - Backend registry with discovered binaries.
|
|
333
|
+
* @param {object} [opts] - Discovery options.
|
|
334
|
+
* @param {object} [opts.config] - Config instance with `get()`.
|
|
335
|
+
* @returns {Promise<Map<string, {models: object[], tiers: object}>>} Per-backend discovery results.
|
|
336
|
+
*/
|
|
337
|
+
export async function discover(registry, opts = {}) {
|
|
338
|
+
const { config } = opts;
|
|
339
|
+
const cache = new Map();
|
|
340
|
+
const successfulProxy = new Map();
|
|
341
|
+
const timeout = proxyTimeout(config);
|
|
342
|
+
|
|
343
|
+
if (registry.discovered.has('cursor')) {
|
|
344
|
+
const result = await discoverCursor();
|
|
345
|
+
if (result) cache.set('cursor', result);
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
for (const def of PROXY_BACKENDS) {
|
|
349
|
+
const { backend, baseKey, keyOrder } = def;
|
|
350
|
+
if (!registry.discovered.has(backend)) continue;
|
|
351
|
+
|
|
352
|
+
const baseUrl = envValue(config, backend, baseKey);
|
|
353
|
+
if (!baseUrl) continue;
|
|
354
|
+
|
|
355
|
+
const normalized = normalizeBase(baseUrl);
|
|
356
|
+
const cached = successfulProxy.get(normalized);
|
|
357
|
+
if (cached) {
|
|
358
|
+
cache.set(backend, cached);
|
|
359
|
+
continue;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
const keys = authKeys(config, backend, keyOrder);
|
|
363
|
+
let found = null;
|
|
364
|
+
|
|
365
|
+
for (const key of keys) {
|
|
366
|
+
try {
|
|
367
|
+
found = await discoverProxy(baseUrl, key, timeout);
|
|
368
|
+
} catch (err) {
|
|
369
|
+
log.warn(`${backend} proxy discovery failed: ${err.message}`);
|
|
370
|
+
}
|
|
371
|
+
if (found) break;
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
if (found) {
|
|
375
|
+
cache.set(backend, found);
|
|
376
|
+
successfulProxy.set(normalized, found);
|
|
377
|
+
log.info(`${backend}: discovered ${found.models.length} models via ${baseKey}`);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
return cache;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
/**
|
|
385
|
+
* Check if a resolved model ID exists in the discovered model list.
|
|
386
|
+
*
|
|
387
|
+
* @param {string} model - Resolved model identifier.
|
|
388
|
+
* @param {string} backend - Backend name.
|
|
389
|
+
* @param {Map<string, {models: object[]}>} discovered - Discovery cache.
|
|
390
|
+
* @returns {boolean} True if the model is known or discovery is unavailable.
|
|
391
|
+
*/
|
|
392
|
+
export function validate(model, backend, discovered) {
|
|
393
|
+
const entry = discovered?.get(backend);
|
|
394
|
+
if (!entry?.models?.length) return true;
|
|
395
|
+
return entry.models.some(function known(m) { return m.id === model; });
|
|
396
|
+
}
|
|
@@ -74,46 +74,51 @@ export class Factory {
|
|
|
74
74
|
// would have an empty model while add_agent identities had the
|
|
75
75
|
// resolved name — causing "unknown" in reviewer stamps.
|
|
76
76
|
const config = opts.config ?? this.config;
|
|
77
|
-
const resolved = resolveModel(model, backendName, vendor(provider), config);
|
|
77
|
+
const resolved = resolveModel(model, backendName, vendor(provider), config, this.backends.models);
|
|
78
78
|
|
|
79
79
|
const identity = this.identities.acquire(theme, provider, resolved, opts.taken);
|
|
80
80
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
opts.context
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
opts.context.denied
|
|
81
|
+
try {
|
|
82
|
+
// Update context.agent before prepare() and backend construction
|
|
83
|
+
// so the Codex -c flags and .mcp.json env vars have the real name
|
|
84
|
+
// instead of null. Context is mutated in-place intentionally —
|
|
85
|
+
// callers (e.g. enlist) pass the same reference and read it back.
|
|
86
|
+
if (opts.context) {
|
|
87
|
+
opts.context.agent = identity.name;
|
|
88
|
+
|
|
89
|
+
if (!opts.context.denied) {
|
|
90
|
+
opts.context.denied = config?.get?.('agents.disallowedTools') ?? [];
|
|
91
|
+
}
|
|
90
92
|
}
|
|
91
|
-
}
|
|
92
93
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
94
|
+
let cwd = opts.cwd ?? pathFor(identity.name);
|
|
95
|
+
|
|
96
|
+
// Collect scaffold descriptors from all registered backends so
|
|
97
|
+
// workspace.prepare() and workspace.create() write configs, hooks,
|
|
98
|
+
// and files generically — no backend-specific knowledge needed.
|
|
99
|
+
const descriptors = this.backends.scaffoldAll(opts.context);
|
|
100
|
+
|
|
101
|
+
// When we have full orchestration context (repo + home), create a
|
|
102
|
+
// git worktree instead of an empty directory. This gives the agent
|
|
103
|
+
// a fully checked-out repo with zero network overhead per agent —
|
|
104
|
+
// all from a shared bare mirror.
|
|
105
|
+
if (opts.context?.repo && opts.context?.home && !opts.cwd) {
|
|
106
|
+
const url = `https://github.com/${opts.context.repo}.git`;
|
|
107
|
+
const bare = await mirror(url, { home: opts.context.home, token: opts.context.token });
|
|
108
|
+
cwd = await createWorktree(bare, 'HEAD', identity.name, undefined, opts.context, descriptors);
|
|
109
|
+
log.info(`worktree ready at ${cwd} from mirror of ${opts.context.repo}`);
|
|
110
|
+
} else {
|
|
111
|
+
await prepare(cwd, opts.context, descriptors);
|
|
112
|
+
}
|
|
112
113
|
|
|
113
|
-
|
|
114
|
-
|
|
114
|
+
const agentOpts = { identity, role, cwd, model, config };
|
|
115
|
+
if (opts.context) agentOpts.context = opts.context;
|
|
115
116
|
|
|
116
|
-
|
|
117
|
-
|
|
117
|
+
log.info(`created ${identity.name} (${role}) via ${backendName}`);
|
|
118
|
+
return this.backends.create(backendName, agentOpts);
|
|
119
|
+
} catch (err) {
|
|
120
|
+
this.identities.release(identity);
|
|
121
|
+
throw err;
|
|
122
|
+
}
|
|
118
123
|
}
|
|
119
124
|
}
|
|
@@ -1,27 +1,45 @@
|
|
|
1
1
|
import { defaults } from 'loreli/config';
|
|
2
|
+
import { validate as validateModel } from './discover.js';
|
|
3
|
+
import { logger } from 'loreli/log';
|
|
4
|
+
|
|
5
|
+
const log = logger('models');
|
|
2
6
|
|
|
3
7
|
/**
|
|
4
8
|
* Resolve a model alias to a concrete model identifier.
|
|
5
9
|
*
|
|
6
10
|
* Resolution order:
|
|
7
11
|
* 1. Config layer: `config.get('backends.{backend}.models.{alias}.{provider}')`
|
|
8
|
-
* 2.
|
|
9
|
-
* 3.
|
|
12
|
+
* 2. Discovery layer: runtime-discovered models classified into tiers
|
|
13
|
+
* 3. Built-in defaults: `defaults.backends[backend].models[alias][provider]`
|
|
14
|
+
* 4. Pass-through: return the alias string unchanged (exact model IDs)
|
|
15
|
+
*
|
|
16
|
+
* When discovery data is available, the resolved model is validated
|
|
17
|
+
* against the known model list. Invalid models trigger a warning and
|
|
18
|
+
* fall back to the backend's default discovered model.
|
|
10
19
|
*
|
|
11
20
|
* @param {string} alias - Alias ('fast', 'balanced', 'powerful') or exact model string.
|
|
12
21
|
* @param {string} backend - Backend name ('claude', 'codex', 'cursor').
|
|
13
22
|
* @param {string} provider - AI provider ('openai' | 'anthropic').
|
|
14
23
|
* @param {object} [config] - Config instance with a `get()` method.
|
|
24
|
+
* @param {Map} [discovered] - Discovery cache from BackendRegistry.
|
|
15
25
|
* @returns {string} Resolved model identifier.
|
|
16
26
|
*/
|
|
17
|
-
export function resolve(alias, backend, provider, config) {
|
|
18
|
-
// Config layer: highest priority
|
|
27
|
+
export function resolve(alias, backend, provider, config, discovered) {
|
|
19
28
|
const fromConfig = config?.get?.(`backends.${backend}.models.${alias}.${provider}`);
|
|
20
29
|
if (fromConfig) return fromConfig;
|
|
21
30
|
|
|
22
|
-
|
|
31
|
+
const fromDiscovery = discovered?.get(backend)?.tiers?.[alias]?.[provider];
|
|
32
|
+
if (fromDiscovery) return fromDiscovery;
|
|
33
|
+
|
|
23
34
|
const fallback = defaults.backends?.[backend]?.models?.[alias]?.[provider];
|
|
24
|
-
if (fallback)
|
|
35
|
+
if (fallback) {
|
|
36
|
+
if (discovered && !validateModel(fallback, backend, discovered)) {
|
|
37
|
+
log.warn(`model "${fallback}" (${alias}/${backend}/${provider}) not found in discovered models — using backend default`);
|
|
38
|
+
const def = discovered.get(backend)?.models?.find(function isDefault(m) { return m.marker === 'default'; });
|
|
39
|
+
if (def) return def.id;
|
|
40
|
+
}
|
|
41
|
+
return fallback;
|
|
42
|
+
}
|
|
25
43
|
|
|
26
44
|
// Pass-through for exact model strings
|
|
27
45
|
return alias;
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
# loreli/classify
|
|
2
|
+
|
|
3
|
+
Prompt-driven LLM classification. Loads a named Mustache template from disk, renders it with the provided content and variables, sends the result through `backends.oneshot()`, and returns the parsed JSON response. The prompt template defines the response shape — classify is generic plumbing.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
Part of the Loreli monorepo. Import via the package exports map:
|
|
8
|
+
|
|
9
|
+
```js
|
|
10
|
+
import { classify } from 'loreli/classify';
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Quick Start
|
|
14
|
+
|
|
15
|
+
Classification requires a `BackendRegistry` with at least one available LLM backend and a prompt template in `packages/classify/prompts/`:
|
|
16
|
+
|
|
17
|
+
```js
|
|
18
|
+
import { classify } from 'loreli/classify';
|
|
19
|
+
|
|
20
|
+
const result = await classify('pane-state', paneOutput, {
|
|
21
|
+
backends: backendRegistry
|
|
22
|
+
});
|
|
23
|
+
// => { category: 'option_dialog', reasoning: 'Trust dialog detected', confidence: 0.9 }
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
The first argument is the template name — it resolves to `prompts/<name>.md` inside the package. The second argument is the text to classify. The template defines what categories exist, what JSON shape to return, and how the LLM should reason about the input.
|
|
27
|
+
|
|
28
|
+
## How It Works
|
|
29
|
+
|
|
30
|
+
```text
|
|
31
|
+
classify('pane-state', text, opts)
|
|
32
|
+
│
|
|
33
|
+
├─ Load prompts/pane-state.md
|
|
34
|
+
├─ Mustache.render(template, { content: text, ...opts.vars })
|
|
35
|
+
├─ backends.oneshot(rendered, { model, timeout })
|
|
36
|
+
└─ Parse JSON from LLM response → return object
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
1. **Load** — Reads `prompts/<name>.md` from disk.
|
|
40
|
+
2. **Render** — Runs Mustache templating. The content is available as `{{{content}}}` (triple-stache, unescaped). Extra variables via `opts.vars` are also available.
|
|
41
|
+
3. **Send** — Calls `backends.oneshot()` with the rendered prompt.
|
|
42
|
+
4. **Parse** — Extracts the first `{...}` JSON object from the response, handling markdown fences and preamble text.
|
|
43
|
+
|
|
44
|
+
## Prompt Templates
|
|
45
|
+
|
|
46
|
+
Templates are Markdown files in `packages/classify/prompts/`. Each template contains the full classification instructions for the LLM, including category definitions and the expected JSON output shape.
|
|
47
|
+
|
|
48
|
+
### `pane-state.md` — orchestrator stall detection
|
|
49
|
+
|
|
50
|
+
Used by the orchestrator's monitor loop and rapid-death detector to diagnose agent state. Returns `{category, reasoning, confidence}` with states: `working`, `waiting_for_input`, `option_dialog`, `error_loop`, `idle`, `fatal`, `dead`. The `dead` state identifies agents whose process exited or crashed — the orchestrator uses it in the rapid-death window (first 15s after spawn) to classify *why* an agent died, replacing the old binary alive/dead check with diagnostic context.
|
|
51
|
+
|
|
52
|
+
### `feedback.md` — knowledge feedback categorization
|
|
53
|
+
|
|
54
|
+
Used by `loreli/knowledge` to classify review feedback. Returns `{category, reasoning, confidence}` with categories: `naming`, `architecture`, `testing`, `documentation`, `performance`, `security`.
|
|
55
|
+
|
|
56
|
+
### `blocker.md` — knowledge per-ref blocker detection
|
|
57
|
+
|
|
58
|
+
Used by `loreli/knowledge` to classify issue/PR references as blockers or informational. Receives `{{{content}}}` (joined discussion text) and `{{refs}}` (formatted ref list). Returns `{blockers: [number], references: [number]}`.
|
|
59
|
+
|
|
60
|
+
### Writing a new template
|
|
61
|
+
|
|
62
|
+
Create a new `.md` file in `prompts/`. Use `{{{content}}}` for the text to classify (triple-stache prevents HTML escaping). Use `{{varName}}` for additional variables passed via `opts.vars`. End the template with the expected JSON shape so the LLM knows what to return.
|
|
63
|
+
|
|
64
|
+
The following example shows how a severity template might look:
|
|
65
|
+
|
|
66
|
+
```markdown
|
|
67
|
+
Classify this error message by severity.
|
|
68
|
+
|
|
69
|
+
Levels:
|
|
70
|
+
- critical: System is down or data loss is occurring
|
|
71
|
+
- warning: Something is wrong but the system is functional
|
|
72
|
+
- info: Normal operational message
|
|
73
|
+
|
|
74
|
+
Respond with ONLY a JSON object.
|
|
75
|
+
{"level": "<severity>", "reasoning": "<one sentence>"}
|
|
76
|
+
|
|
77
|
+
{{{content}}}
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
Then call it:
|
|
81
|
+
|
|
82
|
+
```js
|
|
83
|
+
const result = await classify('severity', errorMessage, { backends });
|
|
84
|
+
// => { level: 'critical', reasoning: 'Database connection lost' }
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## API Reference
|
|
88
|
+
|
|
89
|
+
### `classify(name, content, opts)`
|
|
90
|
+
|
|
91
|
+
Run a named classification prompt against content via LLM.
|
|
92
|
+
|
|
93
|
+
**Parameters:**
|
|
94
|
+
|
|
95
|
+
| Name | Type | Default | Description |
|
|
96
|
+
|------|------|---------|-------------|
|
|
97
|
+
| `name` | `string` | — | Template name — resolves to `prompts/<name>.md`. |
|
|
98
|
+
| `content` | `string` | — | Text to classify — injected as `{{{content}}}`. |
|
|
99
|
+
| `opts.backends` | `BackendRegistry` | — | **Required.** Backend registry with `oneshot()` method. |
|
|
100
|
+
| `opts.config` | `Config` | `undefined` | Config instance for model/timeout resolution. |
|
|
101
|
+
| `opts.model` | `string` | `'fast'` | Model alias override. Falls back to `config.classify.model`, then `'fast'`. |
|
|
102
|
+
| `opts.timeout` | `number` | `30000` | Timeout in ms. Falls back to `config.classify.timeout`, then `30000`. |
|
|
103
|
+
| `opts.vars` | `object` | `{}` | Extra Mustache variables beyond `content`. |
|
|
104
|
+
|
|
105
|
+
**Returns:** `Promise<object>` — Parsed JSON from the LLM. Shape is defined by the prompt template, not enforced by classify.
|
|
106
|
+
|
|
107
|
+
**Throws:**
|
|
108
|
+
|
|
109
|
+
| Error | Cause |
|
|
110
|
+
|-------|-------|
|
|
111
|
+
| `classify() requires a backends instance` | `opts.backends` is missing or falsy. |
|
|
112
|
+
| `ENOENT` | Template file `prompts/<name>.md` does not exist. |
|
|
113
|
+
| LLM error (propagated) | `backends.oneshot()` threw (timeout, network, etc.). |
|
|
114
|
+
| `classify: LLM response contains no JSON object` | Response had no `{...}` block. |
|
|
115
|
+
| `classify: failed to parse JSON from LLM response` | Found `{...}` but it was not valid JSON. |
|
|
116
|
+
|
|
117
|
+
## Configuration
|
|
118
|
+
|
|
119
|
+
The classify package reads configuration from the `classify` section in `loreli.yml`:
|
|
120
|
+
|
|
121
|
+
```yaml
|
|
122
|
+
classify:
|
|
123
|
+
model: fast # Model alias — resolves via backends
|
|
124
|
+
maxLines: 100 # Lines of pane output to capture (used by orchestrator)
|
|
125
|
+
timeout: 30s # Timeout for the oneshot CLI call
|
|
126
|
+
maxRetries: 5 # Consecutive failures before safety-net kill
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
## Error Reference
|
|
130
|
+
|
|
131
|
+
| Error | Cause | Resolution |
|
|
132
|
+
|-------|-------|------------|
|
|
133
|
+
| `classify() requires a backends instance` | No `backends` option provided. | Pass a `BackendRegistry` instance in `opts.backends`. |
|
|
134
|
+
| Template `ENOENT` | Named template does not exist in `prompts/`. | Create the template file or check the name for typos. |
|
|
135
|
+
| `LLM response contains no JSON object` | The LLM returned prose without any JSON. | Check the template instructions — they should explicitly ask for JSON-only output. |
|
|
136
|
+
| `failed to parse JSON from LLM response` | JSON was found but was malformed. | Usually a transient LLM issue. Retry or use a more capable model. |
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
Analyze this discussion text and classify each referenced issue/PR number as either a blocking dependency or an informational reference.
|
|
2
|
+
|
|
3
|
+
Referenced issues: {{refs}}
|
|
4
|
+
|
|
5
|
+
A reference is a **blocker** if the text indicates it must be resolved, merged, or closed before this work can proceed (e.g., "blocked by", "depends on", "needs #N merged first", "waiting for #N").
|
|
6
|
+
|
|
7
|
+
A reference is **informational** if it is mentioned for context, background, or related reading (e.g., "see #N for context", "related to #N", "similar to #N").
|
|
8
|
+
|
|
9
|
+
Respond with ONLY a JSON object. Do not wrap in markdown. Do not add any other text.
|
|
10
|
+
{"blockers": [<number>, ...], "references": [<number>, ...]}
|
|
11
|
+
|
|
12
|
+
{{{content}}}
|