loreli 0.0.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +710 -97
- package/bin/loreli.js +89 -0
- package/package.json +77 -14
- package/packages/README.md +101 -0
- package/packages/action/README.md +98 -0
- package/packages/action/prompts/action.md +172 -0
- package/packages/action/src/index.js +684 -0
- package/packages/agent/README.md +606 -0
- package/packages/agent/src/backends/claude.js +387 -0
- package/packages/agent/src/backends/codex.js +351 -0
- package/packages/agent/src/backends/cursor.js +371 -0
- package/packages/agent/src/backends/index.js +486 -0
- package/packages/agent/src/base.js +138 -0
- package/packages/agent/src/cli.js +275 -0
- package/packages/agent/src/discover.js +396 -0
- package/packages/agent/src/factory.js +124 -0
- package/packages/agent/src/index.js +12 -0
- package/packages/agent/src/models.js +159 -0
- package/packages/agent/src/output.js +62 -0
- package/packages/agent/src/session.js +162 -0
- package/packages/agent/src/trace.js +186 -0
- package/packages/classify/README.md +136 -0
- package/packages/classify/prompts/blocker.md +12 -0
- package/packages/classify/prompts/feedback.md +14 -0
- package/packages/classify/prompts/pane-state.md +20 -0
- package/packages/classify/src/index.js +81 -0
- package/packages/config/README.md +898 -0
- package/packages/config/src/defaults.js +145 -0
- package/packages/config/src/index.js +223 -0
- package/packages/config/src/schema.js +291 -0
- package/packages/config/src/validate.js +160 -0
- package/packages/context/README.md +165 -0
- package/packages/context/src/index.js +198 -0
- package/packages/hub/README.md +338 -0
- package/packages/hub/src/base.js +154 -0
- package/packages/hub/src/github.js +1597 -0
- package/packages/hub/src/index.js +79 -0
- package/packages/hub/src/labels.js +48 -0
- package/packages/identity/README.md +288 -0
- package/packages/identity/src/index.js +620 -0
- package/packages/identity/src/themes/avatar.js +217 -0
- package/packages/identity/src/themes/digimon.js +217 -0
- package/packages/identity/src/themes/dragonball.js +217 -0
- package/packages/identity/src/themes/lotr.js +217 -0
- package/packages/identity/src/themes/marvel.js +217 -0
- package/packages/identity/src/themes/pokemon.js +217 -0
- package/packages/identity/src/themes/starwars.js +217 -0
- package/packages/identity/src/themes/transformers.js +217 -0
- package/packages/identity/src/themes/zelda.js +217 -0
- package/packages/knowledge/README.md +217 -0
- package/packages/knowledge/src/index.js +243 -0
- package/packages/log/README.md +93 -0
- package/packages/log/src/index.js +252 -0
- package/packages/marker/README.md +200 -0
- package/packages/marker/src/index.js +184 -0
- package/packages/mcp/README.md +323 -0
- package/packages/mcp/instructions.md +126 -0
- package/packages/mcp/scaffolding/.agents/skills/loreli-context/SKILL.md +89 -0
- package/packages/mcp/scaffolding/ISSUE_TEMPLATE/config.yml +2 -0
- package/packages/mcp/scaffolding/ISSUE_TEMPLATE/loreli.yml +83 -0
- package/packages/mcp/scaffolding/loreli.yml +491 -0
- package/packages/mcp/scaffolding/mcp-configs/.codex/config.toml +4 -0
- package/packages/mcp/scaffolding/mcp-configs/.cursor/mcp.json +14 -0
- package/packages/mcp/scaffolding/mcp-configs/.mcp.json +14 -0
- package/packages/mcp/scaffolding/pull-request.md +23 -0
- package/packages/mcp/src/index.js +600 -0
- package/packages/mcp/src/tools/agent-context.js +44 -0
- package/packages/mcp/src/tools/agents.js +450 -0
- package/packages/mcp/src/tools/context.js +200 -0
- package/packages/mcp/src/tools/github.js +1163 -0
- package/packages/mcp/src/tools/hitl.js +162 -0
- package/packages/mcp/src/tools/index.js +18 -0
- package/packages/mcp/src/tools/refactor.js +227 -0
- package/packages/mcp/src/tools/repo.js +44 -0
- package/packages/mcp/src/tools/start.js +904 -0
- package/packages/mcp/src/tools/status.js +149 -0
- package/packages/mcp/src/tools/work.js +134 -0
- package/packages/orchestrator/README.md +192 -0
- package/packages/orchestrator/src/index.js +1492 -0
- package/packages/planner/README.md +251 -0
- package/packages/planner/prompts/plan-reviewer.md +109 -0
- package/packages/planner/prompts/planner.md +191 -0
- package/packages/planner/prompts/tiebreaker-reviewer.md +71 -0
- package/packages/planner/src/index.js +1381 -0
- package/packages/review/README.md +129 -0
- package/packages/review/prompts/reviewer.md +158 -0
- package/packages/review/src/index.js +1403 -0
- package/packages/risk/README.md +178 -0
- package/packages/risk/prompts/risk.md +272 -0
- package/packages/risk/src/index.js +439 -0
- package/packages/session/README.md +165 -0
- package/packages/session/src/index.js +215 -0
- package/packages/test-utils/README.md +96 -0
- package/packages/test-utils/src/index.js +354 -0
- package/packages/tmux/README.md +261 -0
- package/packages/tmux/src/index.js +501 -0
- package/packages/workflow/README.md +317 -0
- package/packages/workflow/prompts/preamble.md +14 -0
- package/packages/workflow/src/index.js +660 -0
- package/packages/workflow/src/proof-of-life.js +74 -0
- package/packages/workspace/README.md +143 -0
- package/packages/workspace/src/index.js +1127 -0
- package/index.js +0 -8
|
@@ -0,0 +1,606 @@
|
|
|
1
|
+
# loreli/agent
|
|
2
|
+
|
|
3
|
+
Agent lifecycle management with pluggable backends, session persistence, and role-based prompt templating.
|
|
4
|
+
|
|
5
|
+
## API Reference
|
|
6
|
+
|
|
7
|
+
### Agent (Base Class)
|
|
8
|
+
|
|
9
|
+
Abstract base class for all backends. Extends `EventEmitter`.
|
|
10
|
+
|
|
11
|
+
```js
|
|
12
|
+
import { Agent } from 'loreli/agent';
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({ identity, role: 'action', cwd: '/path/to/repo' });
|
|
15
|
+
agent.state; // 'idle' | 'spawned' | 'working' | 'standby' | 'reviewing' | 'dormant'
|
|
16
|
+
agent.canTransition('spawned'); // true — check before transitioning
|
|
17
|
+
await agent.spawn(); // Start the agent
|
|
18
|
+
await agent.send(msg); // Deliver work
|
|
19
|
+
await agent.capture(); // Read latest output (default 500 lines)
|
|
20
|
+
await agent.capture(40); // Read last 40 lines
|
|
21
|
+
await agent.stop(); // Graceful shutdown
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
#### State Machine
|
|
25
|
+
|
|
26
|
+
Agent state transitions are validated. Invalid transitions throw an error. The `dormant` state is terminal — a dormant agent cannot be reactivated without a fresh spawn.
|
|
27
|
+
|
|
28
|
+
```mermaid
|
|
29
|
+
stateDiagram-v2
|
|
30
|
+
[*] --> idle
|
|
31
|
+
idle --> spawned
|
|
32
|
+
idle --> dormant
|
|
33
|
+
spawned --> working
|
|
34
|
+
spawned --> standby
|
|
35
|
+
spawned --> dormant
|
|
36
|
+
working --> standby
|
|
37
|
+
working --> reviewing
|
|
38
|
+
working --> awaiting_hitl
|
|
39
|
+
working --> dormant
|
|
40
|
+
standby --> working
|
|
41
|
+
standby --> reviewing
|
|
42
|
+
standby --> awaiting_hitl
|
|
43
|
+
standby --> dormant
|
|
44
|
+
reviewing --> working
|
|
45
|
+
reviewing --> standby
|
|
46
|
+
reviewing --> awaiting_hitl
|
|
47
|
+
reviewing --> dormant
|
|
48
|
+
awaiting_hitl --> working
|
|
49
|
+
awaiting_hitl --> standby
|
|
50
|
+
awaiting_hitl --> dormant
|
|
51
|
+
dormant --> [*]
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Session State Machine
|
|
55
|
+
|
|
56
|
+
Sessions track the same states as agents (minus `idle`) with the same validated transitions:
|
|
57
|
+
|
|
58
|
+
```js
|
|
59
|
+
import { Session, STATES, TRANSITIONS } from 'loreli/agent';
|
|
60
|
+
|
|
61
|
+
const s = new Session({ identity, role: 'action', backend: 'claude' });
|
|
62
|
+
s.state; // 'spawned'
|
|
63
|
+
s.canTransition('working'); // true
|
|
64
|
+
s.transition('working'); // valid
|
|
65
|
+
s.transition('dormant'); // valid (terminal)
|
|
66
|
+
s.transition('working'); // throws: Invalid transition: "dormant" -> "working"
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### CliAgent
|
|
70
|
+
|
|
71
|
+
Tmux-managed CLI agent. Each agent gets its own window in the `loreli` tmux session.
|
|
72
|
+
|
|
73
|
+
All backends use a **launcher script** pattern for spawn: a `/bin/sh` script is written to the agent's cwd and executed directly via `tmux new-window`, bypassing the user's login shell (`.zshrc`, etc.) and its initialization prompts.
|
|
74
|
+
|
|
75
|
+
```js
|
|
76
|
+
import { CliAgent } from 'loreli/agent';
|
|
77
|
+
|
|
78
|
+
const agent = new CliAgent({
|
|
79
|
+
identity, role: 'action', cwd: '/path/to/repo',
|
|
80
|
+
command: 'claude --dangerously-skip-permissions --model claude-sonnet-4-20250514'
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
await agent.spawn(); // Writes launcher script, creates tmux window
|
|
84
|
+
await agent.send(msg); // tmux send-keys (single-line) or file-based (multi-line)
|
|
85
|
+
await agent.capture(n); // tmux capture-pane (optional line count, defaults to 500)
|
|
86
|
+
await agent.alive(); // tmux pane alive check
|
|
87
|
+
await agent.stop(); // kill pane
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
### Backend Hierarchy
|
|
91
|
+
|
|
92
|
+
```mermaid
|
|
93
|
+
graph TD
|
|
94
|
+
Agent["Agent (abstract, EventEmitter)"]
|
|
95
|
+
CliAgent["CliAgent (tmux-managed)"]
|
|
96
|
+
Claude["ClaudeBackend (interactive)"]
|
|
97
|
+
Cursor["CursorBackend (interactive, multi-provider)"]
|
|
98
|
+
Codex["CodexBackend (interactive)"]
|
|
99
|
+
|
|
100
|
+
Agent --> CliAgent
|
|
101
|
+
CliAgent --> Claude
|
|
102
|
+
CliAgent --> Cursor
|
|
103
|
+
CliAgent --> Codex
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Backends
|
|
107
|
+
|
|
108
|
+
#### ClaudeBackend
|
|
109
|
+
|
|
110
|
+
Interactive backend using the `claude` CLI. Stays running in a tmux pane. Provider: `anthropic`.
|
|
111
|
+
|
|
112
|
+
Uses `--dangerously-skip-permissions` to bypass all startup dialogs (workspace trust, permission bypass). Uses `--mcp-config` to load the scaffolded `.mcp.json` that connects the agent back to Loreli. Prompts are delivered via `send()` (not `--prompt`) to avoid shell injection.
|
|
113
|
+
|
|
114
|
+
```js
|
|
115
|
+
import { ClaudeBackend } from 'loreli/agent';
|
|
116
|
+
|
|
117
|
+
const agent = new ClaudeBackend({
|
|
118
|
+
identity, role: 'action', cwd: '/path/to/repo',
|
|
119
|
+
model: 'balanced', // resolves via config, defaults to claude-sonnet-4-5-20250929
|
|
120
|
+
config // optional Config instance for model resolution
|
|
121
|
+
});
|
|
122
|
+
// command: claude --dangerously-skip-permissions --model claude-sonnet-4-5-20250929 --mcp-config /path/to/repo/.mcp.json
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
#### CursorBackend
|
|
126
|
+
|
|
127
|
+
Interactive backend using the `cursor-agent` CLI. Multi-provider — runs models from Anthropic, OpenAI, Google, and others via a single binary. This makes it the natural fallback when provider-specific CLIs (`claude`, `codex`) are unavailable or their API endpoints are unreachable (e.g. behind a VPN-dependent proxy).
|
|
128
|
+
|
|
129
|
+
The yin/yang adversarial pairing is preserved because each agent's identity carries its provider. Model aliases resolve directly from config via `backends.cursor.models.{tier}.{provider}` — no translation table. Unknown model names are passed through directly, so cursor-specific names like `gemini-3-pro` work out of the box.
|
|
130
|
+
|
|
131
|
+
The command includes `--force` (auto-approve tool usage), `--sandbox disabled` (no sandbox prompts), and `--approve-mcps` (auto-approve the scaffolded Loreli MCP server). Multi-line prompts are written to a temporary Markdown file and delivered via a single-line reference, matching the `ClaudeBackend` pattern.
|
|
132
|
+
|
|
133
|
+
```js
|
|
134
|
+
import { CursorBackend } from 'loreli/agent';
|
|
135
|
+
|
|
136
|
+
// Anthropic-side agent — resolves balanced to sonnet-4.5-thinking from config
|
|
137
|
+
const action = new CursorBackend({
|
|
138
|
+
identity: anthropicIdentity, role: 'action', cwd: '/path/to/repo',
|
|
139
|
+
model: 'balanced'
|
|
140
|
+
});
|
|
141
|
+
// command: cursor-agent --model sonnet-4.5-thinking --force --sandbox disabled --approve-mcps --workspace /path/to/repo
|
|
142
|
+
|
|
143
|
+
// OpenAI-side agent — resolves balanced to gpt-5.3-codex from config
|
|
144
|
+
const reviewer = new CursorBackend({
|
|
145
|
+
identity: openaiIdentity, role: 'reviewer', cwd: '/path/to/repo',
|
|
146
|
+
model: 'balanced'
|
|
147
|
+
});
|
|
148
|
+
// command: cursor-agent --model gpt-5.3-codex --force --sandbox disabled --approve-mcps --workspace /path/to/repo
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
#### CodexBackend
|
|
152
|
+
|
|
153
|
+
Interactive backend using the `codex` CLI. Stays running in a tmux pane. Provider: `openai`.
|
|
154
|
+
|
|
155
|
+
Uses `-a never` (disable approval prompts), `-s workspace-write` (sandboxed write access), and `--no-alt-screen` (inline TUI mode for tmux capture compatibility). MCP servers are injected via `-c` flags because Codex only reads `~/.codex/config.toml` (global), not local config. When token context is present, Codex forwards `GITHUB_TOKEN` via `mcp_servers.loreli.env_vars` so no literal token appears in command flags. Multi-line prompts are written to a Markdown file and delivered via a single-line reference, matching the ClaudeBackend pattern.
|
|
156
|
+
|
|
157
|
+
```js
|
|
158
|
+
import { CodexBackend } from 'loreli/agent';
|
|
159
|
+
|
|
160
|
+
const agent = new CodexBackend({
|
|
161
|
+
identity, role: 'action', cwd: '/path/to/repo',
|
|
162
|
+
model: 'fast', // resolves via config, defaults to gpt-5-mini
|
|
163
|
+
config // optional Config instance for model resolution
|
|
164
|
+
});
|
|
165
|
+
// command: codex --model gpt-5-mini -a never -s workspace-write --no-alt-screen -C /path/to/repo
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Session
|
|
169
|
+
|
|
170
|
+
Tracks runtime state. Persisted to disk for resilience.
|
|
171
|
+
|
|
172
|
+
```js
|
|
173
|
+
import { Session } from 'loreli/agent';
|
|
174
|
+
|
|
175
|
+
const session = new Session({
|
|
176
|
+
identity: { name: 'optimus-0', provider: 'openai' },
|
|
177
|
+
role: 'action', backend: 'claude', paneId: '%3'
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
session.transition('working');
|
|
181
|
+
session.toJSON();
|
|
182
|
+
await session.save('/path/to/file.json');
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
**States**: `spawned` -> `working` -> `standby` -> `reviewing` -> `awaiting_hitl` -> `dormant`
|
|
186
|
+
|
|
187
|
+
#### Session State Machine
|
|
188
|
+
|
|
189
|
+
```mermaid
|
|
190
|
+
stateDiagram-v2
|
|
191
|
+
[*] --> spawned
|
|
192
|
+
spawned --> working
|
|
193
|
+
working --> standby
|
|
194
|
+
working --> reviewing
|
|
195
|
+
standby --> working
|
|
196
|
+
reviewing --> working: feedback
|
|
197
|
+
reviewing --> awaiting_hitl: HITL
|
|
198
|
+
awaiting_hitl --> working: rework
|
|
199
|
+
awaiting_hitl --> dormant: human merges
|
|
200
|
+
working --> dormant
|
|
201
|
+
reviewing --> dormant
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
#### HITL Fields
|
|
205
|
+
|
|
206
|
+
When HITL (human in the loop) is active, sessions track additional state:
|
|
207
|
+
|
|
208
|
+
| Field | Type | Default | Description |
|
|
209
|
+
|-------|------|---------|-------------|
|
|
210
|
+
| `reviewers` | `string[]` | `[]` | GitHub usernames assigned as human reviewers |
|
|
211
|
+
| `agentApprovals` | `Array<{name, provider, timestamp}>` | `[]` | Agent approval records |
|
|
212
|
+
| `hitlAt` | `string\|null` | `null` | ISO timestamp when HITL was activated |
|
|
213
|
+
|
|
214
|
+
### BackendRegistry
|
|
215
|
+
|
|
216
|
+
Discovers available backends at startup by checking which CLI binaries exist on PATH.
|
|
217
|
+
|
|
218
|
+
```js
|
|
219
|
+
import { BackendRegistry } from 'loreli/agent';
|
|
220
|
+
|
|
221
|
+
const registry = new BackendRegistry();
|
|
222
|
+
await registry.discover();
|
|
223
|
+
|
|
224
|
+
registry.available(); // [{ name, provider, binary }]
|
|
225
|
+
registry.providers(); // ['anthropic', 'openai', 'cursor-openai', 'cursor-anthropic']
|
|
226
|
+
registry.has('cursor'); // true if cursor-agent is installed
|
|
227
|
+
registry.has('claude'); // true if claude is installed
|
|
228
|
+
|
|
229
|
+
// Dynamic registration
|
|
230
|
+
registry.register('custom', CustomBackend, { provider: 'custom' });
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
The registry auto-detects these built-in backends:
|
|
234
|
+
|
|
235
|
+
| Name | Binary | Provider |
|
|
236
|
+
|------|--------|----------|
|
|
237
|
+
| `claude` | `claude` | `anthropic` |
|
|
238
|
+
| `codex` | `codex` | `openai` |
|
|
239
|
+
| `cursor` | `cursor-agent` | `multi` |
|
|
240
|
+
|
|
241
|
+
#### `forProvider(provider)` — Provider-Aware Backend Selection
|
|
242
|
+
|
|
243
|
+
The primary entry point for choosing a backend by AI provider. The orchestrator and any other consumer should call this instead of implementing their own discovery logic.
|
|
244
|
+
|
|
245
|
+
Resolution order:
|
|
246
|
+
1. **Exact match** — backend whose `provider` matches (e.g. `claude` for `'anthropic'`)
|
|
247
|
+
2. **Multi-provider fallback** — `cursor` (runs any provider via cursor-agent)
|
|
248
|
+
3. **Default fallback** — `defaultBackend()` chain (claude → cursor → first)
|
|
249
|
+
|
|
250
|
+
This example shows how cursor-agent acts as a transparent fallback when the `claude` binary is absent or its API endpoint is unreachable:
|
|
251
|
+
|
|
252
|
+
```js
|
|
253
|
+
const registry = new BackendRegistry();
|
|
254
|
+
await registry.discover();
|
|
255
|
+
|
|
256
|
+
// When claude is installed and reachable:
|
|
257
|
+
registry.forProvider('anthropic'); // 'claude'
|
|
258
|
+
registry.forProvider('openai'); // 'codex'
|
|
259
|
+
|
|
260
|
+
// When only cursor-agent is installed (VPN down, or no claude/codex):
|
|
261
|
+
registry.forProvider('anthropic'); // 'cursor' — runs sonnet-4.5
|
|
262
|
+
registry.forProvider('openai'); // 'cursor' — runs gpt-5.2
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
### Factory
|
|
266
|
+
|
|
267
|
+
Centralizes the agent creation pipeline: discover → acquire identity → create working directory → select backend → instantiate. This eliminates duplication between the orchestrator's `enlist()` and `rework()` paths and ensures consistent backend selection via `forProvider()`.
|
|
268
|
+
|
|
269
|
+
The factory **creates** agents but does not **spawn** them. Spawning and registration is the caller's responsibility.
|
|
270
|
+
|
|
271
|
+
```js
|
|
272
|
+
import { Factory, BackendRegistry } from 'loreli/agent';
|
|
273
|
+
import { Registry } from 'loreli/identity';
|
|
274
|
+
import { Config } from 'loreli/config';
|
|
275
|
+
|
|
276
|
+
const config = new Config();
|
|
277
|
+
await config.load(hub, 'owner/repo');
|
|
278
|
+
|
|
279
|
+
const factory = new Factory({
|
|
280
|
+
backends: new BackendRegistry(),
|
|
281
|
+
identities: new Registry(),
|
|
282
|
+
config
|
|
283
|
+
});
|
|
284
|
+
|
|
285
|
+
// Create an action agent for the anthropic side
|
|
286
|
+
const agent = await factory.create('anthropic', 'action', {
|
|
287
|
+
theme: 'transformers',
|
|
288
|
+
model: 'balanced'
|
|
289
|
+
});
|
|
290
|
+
|
|
291
|
+
// agent.state === 'idle' — caller spawns when ready
|
|
292
|
+
await agent.spawn();
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
The factory threads `config` to each backend constructor for config-driven model resolution. A per-create config override is also supported via `opts.config`.
|
|
296
|
+
|
|
297
|
+
### Output Utilities
|
|
298
|
+
|
|
299
|
+
Agent output processing: ANSI stripping, truncation, and cleaning.
|
|
300
|
+
|
|
301
|
+
```js
|
|
302
|
+
import { output } from 'loreli/agent';
|
|
303
|
+
|
|
304
|
+
const raw = await agent.capture();
|
|
305
|
+
const cleaned = output.clean(raw); // strip ANSI + truncate to 12000 chars
|
|
306
|
+
const stripped = output.strip(raw); // strip ANSI only
|
|
307
|
+
const short = output.truncate(raw, 5000); // truncate only, custom limit
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
### Workspace Preparation Re-export
|
|
311
|
+
|
|
312
|
+
`loreli/agent` re-exports `prepare` from `loreli/workspace` for callers that build agents and workspaces together.
|
|
313
|
+
|
|
314
|
+
```js
|
|
315
|
+
import { prepare } from 'loreli/agent';
|
|
316
|
+
|
|
317
|
+
await prepare('~/.loreli/workspaces/loreli-optimus-0', {
|
|
318
|
+
session: 's1',
|
|
319
|
+
agent: 'optimus-0',
|
|
320
|
+
repo: 'owner/repo'
|
|
321
|
+
});
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
### Model Aliases
|
|
325
|
+
|
|
326
|
+
Loreli provides human-friendly model aliases (`fast`, `balanced`, `powerful`) that resolve to backend-specific and provider-specific model identifiers. Resolution combines config overrides, runtime discovery, and static fallbacks to always produce a valid model ID.
|
|
327
|
+
|
|
328
|
+
The `resolve()` function takes an alias, backend name, provider, optional config, and optional discovery cache. Exact model strings (not matching any alias) are returned unchanged.
|
|
329
|
+
|
|
330
|
+
The following example demonstrates basic alias resolution using built-in defaults. Each backend has its own model mappings — the claude backend resolves to provider-specific model IDs, while the cursor backend resolves to cursor-agent short names:
|
|
331
|
+
|
|
332
|
+
```js
|
|
333
|
+
import { models } from 'loreli/agent';
|
|
334
|
+
|
|
335
|
+
models.resolve('fast', 'claude', 'anthropic'); // 'claude-haiku-4-5-20251001'
|
|
336
|
+
models.resolve('fast', 'codex', 'openai'); // 'gpt-5-mini'
|
|
337
|
+
models.resolve('fast', 'cursor', 'anthropic'); // 'sonnet-4.5'
|
|
338
|
+
models.resolve('gpt-custom', 'codex', 'openai'); // 'gpt-custom' (passthrough)
|
|
339
|
+
```
|
|
340
|
+
|
|
341
|
+
The following example demonstrates overriding model IDs via config. This is useful when your environment routes through a LiteLLM proxy or you have access to newer model versions:
|
|
342
|
+
|
|
343
|
+
```js
|
|
344
|
+
import { models } from 'loreli/agent';
|
|
345
|
+
import { Config } from 'loreli/config';
|
|
346
|
+
|
|
347
|
+
const config = new Config();
|
|
348
|
+
config.file = {
|
|
349
|
+
backends: {
|
|
350
|
+
codex: {
|
|
351
|
+
models: {
|
|
352
|
+
fast: { openai: 'my-custom-gpt' }
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
};
|
|
357
|
+
|
|
358
|
+
models.resolve('fast', 'codex', 'openai', config); // 'my-custom-gpt'
|
|
359
|
+
models.resolve('balanced', 'codex', 'openai', config); // 'gpt-5.1-codex' (falls to defaults)
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
#### Resolution Chain
|
|
363
|
+
|
|
364
|
+
Model resolution follows a four-layer priority chain:
|
|
365
|
+
|
|
366
|
+
1. **Config override** — `backends.{name}.models.{alias}.{provider}` from `loreli.yml` or `config.merge()`. Always wins. This is the escape hatch for LiteLLM proxies, private deployments, and custom model names.
|
|
367
|
+
2. **Runtime discovery** — models discovered at startup from backend CLIs and configured proxy endpoints. `cursor-agent` uses `--list-models`. `claude`/`codex` use OpenAI-compatible model listing (`/v1/models` with `/models` fallback) when `ANTHROPIC_BASE_URL`/`OPENAI_BASE_URL` is configured. Discovered models are classified into tiers by name-pattern heuristics and cached on the `BackendRegistry`.
|
|
368
|
+
3. **Static fallbacks** — built-in defaults from `defaults.js`. When discovery data is available, static fallbacks are validated against the discovered model list. Invalid models trigger a warning and fall back to the backend's default discovered model.
|
|
369
|
+
4. **Pass-through** — exact model strings (those not matching any alias) bypass resolution entirely.
|
|
370
|
+
|
|
371
|
+
#### Auto Model Discovery
|
|
372
|
+
|
|
373
|
+
At startup, `BackendRegistry.discover()` probes available backends for their supported models:
|
|
374
|
+
|
|
375
|
+
| Backend | Discovery Method | Behavior |
|
|
376
|
+
|---------|-----------------|----------|
|
|
377
|
+
| `cursor-agent` | `--list-models` CLI flag | Parses structured output, classifies into tiers per provider |
|
|
378
|
+
| `claude` | Proxy model listing (`/v1/models` / `/models`) when `ANTHROPIC_BASE_URL` is configured | Auth uses `ANTHROPIC_API_KEY` first, then `OPENAI_API_KEY`; if discovery is unavailable, static defaults are used |
|
|
379
|
+
| `codex` | Proxy model listing (`/v1/models` / `/models`) when `OPENAI_BASE_URL` is configured | Auth uses `OPENAI_API_KEY` first, then `ANTHROPIC_API_KEY`; if discovery is unavailable, static defaults are used |
|
|
380
|
+
|
|
381
|
+
Proxy requests use `timeouts.proxyDiscovery` from config (default `5000` ms). This controls the per-request HTTP timeout for `/v1/models` / `/models` discovery calls.
|
|
382
|
+
|
|
383
|
+
For all runtime-discovered models, tiers are inferred from name-pattern heuristics:
|
|
384
|
+
|
|
385
|
+
| Tier | Patterns |
|
|
386
|
+
|------|----------|
|
|
387
|
+
| `powerful` | `-xhigh`, `-max`, `-high`, `opus-*`, `o3` |
|
|
388
|
+
| `balanced` | No tier suffix, `-thinking` (non-opus), bare codex |
|
|
389
|
+
| `fast` | `-low`, `-mini`, `haiku-*`, `o4-mini` |
|
|
390
|
+
|
|
391
|
+
Discovery results override static defaults but are themselves overridden by explicit config. When discovery fails or is unavailable (including direct non-proxy setups), static fallbacks are used unchanged.
|
|
392
|
+
|
|
393
|
+
#### Validation
|
|
394
|
+
|
|
395
|
+
When discovery data is available for a backend, resolved model IDs are validated against the discovered model list. If a resolved model (from static defaults or config) is not found in the discovered list, a warning is logged and the backend's default model is used instead. This catches:
|
|
396
|
+
|
|
397
|
+
- Stale static defaults (model IDs removed by providers)
|
|
398
|
+
- Typos in `loreli.yml` backend model overrides
|
|
399
|
+
- Models no longer available in the current subscription/plan
|
|
400
|
+
|
|
401
|
+
When no discovery data is available (discovery failed, backend doesn't support it, or backend not installed), validation is skipped and models pass through as before.
|
|
402
|
+
|
|
403
|
+
#### Static Default Mappings
|
|
404
|
+
|
|
405
|
+
These are the built-in fallback model IDs used when neither config nor discovery provides a value:
|
|
406
|
+
|
|
407
|
+
**Claude backend** (Anthropic model IDs):
|
|
408
|
+
|
|
409
|
+
| Alias | Anthropic |
|
|
410
|
+
|-------|-----------|
|
|
411
|
+
| `fast` | `claude-haiku-4-5-20251001` |
|
|
412
|
+
| `balanced` | `claude-sonnet-4-5-20250929` |
|
|
413
|
+
| `powerful` | `claude-opus-4-5-20251101` |
|
|
414
|
+
|
|
415
|
+
**Codex backend** (OpenAI model IDs):
|
|
416
|
+
|
|
417
|
+
| Alias | OpenAI |
|
|
418
|
+
|-------|--------|
|
|
419
|
+
| `fast` | `gpt-5-mini` |
|
|
420
|
+
| `balanced` | `gpt-5.1-codex` |
|
|
421
|
+
| `powerful` | `gpt-5.2-pro` |
|
|
422
|
+
|
|
423
|
+
**Cursor backend** (cursor-agent short names):
|
|
424
|
+
|
|
425
|
+
| Alias | Anthropic | OpenAI |
|
|
426
|
+
|-------|-----------|--------|
|
|
427
|
+
| `fast` | `sonnet-4.5` | `gpt-5.3-codex-low` |
|
|
428
|
+
| `balanced` | `sonnet-4.5-thinking` | `gpt-5.3-codex` |
|
|
429
|
+
| `powerful` | `opus-4.6-thinking` | `gpt-5.1-codex-max` |
|
|
430
|
+
|
|
431
|
+
#### LiteLLM / Proxy Override
|
|
432
|
+
|
|
433
|
+
When backends are behind a LiteLLM proxy or custom gateway, model discovery will query the configured base URL and validate resolved IDs against the returned model list. If your proxy uses custom aliases, override via `loreli.yml`:
|
|
434
|
+
|
|
435
|
+
```yaml
|
|
436
|
+
backends:
|
|
437
|
+
claude:
|
|
438
|
+
env:
|
|
439
|
+
ANTHROPIC_BASE_URL: https://your-litellm.example.com/v1
|
|
440
|
+
models:
|
|
441
|
+
fast:
|
|
442
|
+
anthropic: litellm/haiku
|
|
443
|
+
balanced:
|
|
444
|
+
anthropic: litellm/sonnet
|
|
445
|
+
powerful:
|
|
446
|
+
anthropic: litellm/opus
|
|
447
|
+
codex:
|
|
448
|
+
env:
|
|
449
|
+
OPENAI_BASE_URL: https://your-litellm.example.com/v1
|
|
450
|
+
models:
|
|
451
|
+
fast:
|
|
452
|
+
openai: litellm/gpt-mini
|
|
453
|
+
```
|
|
454
|
+
|
|
455
|
+
Config overrides always take precedence — discovery and static defaults are bypassed entirely when `backends.{name}.models` is configured.
|
|
456
|
+
|
|
457
|
+
### Backend Environment Variables
|
|
458
|
+
|
|
459
|
+
The `models.env()` function collects environment variables for a backend's launcher script. It merges two layers:
|
|
460
|
+
|
|
461
|
+
1. **Inherited** — `process.env` vars matching the backend's known prefixes (e.g. `ANTHROPIC_*`, `CLAUDE_*` for the `claude` backend) are collected automatically
|
|
462
|
+
2. **Config overrides** — `backends.{name}.env` from `loreli.yml` or `config.merge()` take precedence on key collision
|
|
463
|
+
|
|
464
|
+
This ensures critical variables like `ANTHROPIC_BASE_URL` (proxy URL), `ANTHROPIC_AUTH_TOKEN`, and `CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS` are forwarded from the orchestrator's `process.env` into tmux-spawned agents — even when the tmux server's environment doesn't have them.
|
|
465
|
+
|
|
466
|
+
The following example demonstrates how `env()` collects process.env vars and merges with config overrides. This is what each backend calls in its constructor to populate `this._env`:
|
|
467
|
+
|
|
468
|
+
```js
|
|
469
|
+
import { models } from 'loreli/agent';
|
|
470
|
+
|
|
471
|
+
// With ANTHROPIC_BASE_URL set in process.env:
|
|
472
|
+
const vars = models.env('claude');
|
|
473
|
+
// { ANTHROPIC_BASE_URL: '...', ANTHROPIC_AUTH_TOKEN: '...', CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS: '...' }
|
|
474
|
+
|
|
475
|
+
// Config overrides take precedence:
|
|
476
|
+
const config = new Config();
|
|
477
|
+
config.file = { backends: { claude: { env: { ANTHROPIC_BASE_URL: 'https://override.example.com' } } } };
|
|
478
|
+
const vars2 = models.env('claude', config);
|
|
479
|
+
// { ANTHROPIC_BASE_URL: 'https://override.example.com', ...inherited }
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
| Backend | Inherited Prefixes |
|
|
483
|
+
|---------|-------------------|
|
|
484
|
+
| `claude` | `ANTHROPIC_*`, `CLAUDE_*` |
|
|
485
|
+
| `codex` | `OPENAI_*`, `CODEX_*` |
|
|
486
|
+
| `cursor` | `ANTHROPIC_*`, `OPENAI_*`, `CLAUDE_*`, `CURSOR_*` |
|
|
487
|
+
|
|
488
|
+
Returns `undefined` when no matching vars exist in either layer.
|
|
489
|
+
|
|
490
|
+
### Formatting Env for Launcher Scripts
|
|
491
|
+
|
|
492
|
+
The `models.format()` function converts an env object into shell `export` lines for launcher scripts. Values are single-quoted to prevent shell expansion.
|
|
493
|
+
|
|
494
|
+
```js
|
|
495
|
+
import { models } from 'loreli/agent';
|
|
496
|
+
|
|
497
|
+
models.format({ ANTHROPIC_BASE_URL: 'https://proxy.example.com', FOO: 'bar' });
|
|
498
|
+
// "export ANTHROPIC_BASE_URL='https://proxy.example.com'\nexport FOO='bar'\n"
|
|
499
|
+
|
|
500
|
+
models.format(undefined); // '' (empty string)
|
|
501
|
+
models.format({}); // '' (empty string)
|
|
502
|
+
```
|
|
503
|
+
|
|
504
|
+
All three CLI backends (`ClaudeBackend`, `CodexBackend`, `CursorBackend`) use `format(this._env)` when writing their launcher scripts.
|
|
505
|
+
|
|
506
|
+
### Model Display Names
|
|
507
|
+
|
|
508
|
+
Convert full API model identifiers to human-readable labels. Uses a date-stripping heuristic — strips trailing `-YYYYMMDD` date suffixes.
|
|
509
|
+
|
|
510
|
+
```js
|
|
511
|
+
import { models } from 'loreli/agent';
|
|
512
|
+
|
|
513
|
+
models.display('claude-haiku-4-5-20251001'); // 'claude-haiku-4-5'
|
|
514
|
+
models.display('claude-sonnet-4-5-20250929'); // 'claude-sonnet-4-5'
|
|
515
|
+
models.display('gpt-5-mini'); // 'gpt-5-mini'
|
|
516
|
+
models.display('o3'); // 'o3'
|
|
517
|
+
|
|
518
|
+
// Unknown models: strips trailing date suffix
|
|
519
|
+
models.display('claude-sonnet-5-20260101'); // 'claude-sonnet-5'
|
|
520
|
+
|
|
521
|
+
// No date suffix: returned unchanged
|
|
522
|
+
models.display('gemini-3-pro'); // 'gemini-3-pro'
|
|
523
|
+
```
|
|
524
|
+
|
|
525
|
+
| Full Identifier | Display Name |
|
|
526
|
+
|----------------|-------------|
|
|
527
|
+
| `claude-haiku-4-5-20251001` | `claude-haiku-4-5` |
|
|
528
|
+
| `claude-sonnet-4-5-20250929` | `claude-sonnet-4-5` |
|
|
529
|
+
| `claude-opus-4-5-20251101` | `claude-opus-4-5` |
|
|
530
|
+
| `gpt-5-mini` | `gpt-5-mini` |
|
|
531
|
+
| `gpt-5.1-codex` | `gpt-5.1-codex` |
|
|
532
|
+
| `gpt-5.2-pro` | `gpt-5.2-pro` |
|
|
533
|
+
|
|
534
|
+
## Fallback Strategies
|
|
535
|
+
|
|
536
|
+
Backend selection is handled entirely by `BackendRegistry.forProvider()`. The orchestrator never implements its own discovery logic.
|
|
537
|
+
|
|
538
|
+
| Environment | Strategy |
|
|
539
|
+
|-------------|----------|
|
|
540
|
+
| claude + codex installed | Yin/Yang: dedicated CLIs per provider |
|
|
541
|
+
| Only cursor-agent installed | Yin/Yang: cursor-agent runs both sides with different models |
|
|
542
|
+
| Mixed (e.g. claude + cursor-agent) | Exact match first, cursor fills the gap |
|
|
543
|
+
| Nothing available | Error with installation guidance |
|
|
544
|
+
|
|
545
|
+
## One-Shot LLM Calls (`oneshot`)
|
|
546
|
+
|
|
547
|
+
Each backend provides a static `oneshot(prompt, opts)` method for non-interactive LLM calls. This uses the CLI's print/exec mode (`child_process.execFile`) — no tmux, no workspace, no agent lifecycle. It reuses the same model resolution and environment variable collection as interactive agents.
|
|
548
|
+
|
|
549
|
+
The `BackendRegistry` also exposes a convenience `oneshot()` that discovers once, orders backends with discovery-backed entries first, and falls back across remaining oneshot-capable backends when one fails.
|
|
550
|
+
|
|
551
|
+
### Usage
|
|
552
|
+
|
|
553
|
+
```js
|
|
554
|
+
import { BackendRegistry, ClaudeBackend } from 'loreli/agent';
|
|
555
|
+
|
|
556
|
+
// Via specific backend
|
|
557
|
+
const answer = await ClaudeBackend.oneshot('Summarize this text...', {
|
|
558
|
+
model: 'fast',
|
|
559
|
+
timeout: 30000
|
|
560
|
+
});
|
|
561
|
+
|
|
562
|
+
// Via registry (discovery-first ordering + fallback)
|
|
563
|
+
const backends = new BackendRegistry();
|
|
564
|
+
await backends.discover();
|
|
565
|
+
const result = await backends.oneshot('Classify this output...', { model: 'fast' });
|
|
566
|
+
```
|
|
567
|
+
|
|
568
|
+
### Backend CLI Flags
|
|
569
|
+
|
|
570
|
+
| Backend | Binary | Print Mode |
|
|
571
|
+
|---------|--------|------------|
|
|
572
|
+
| Claude | `claude` | `-p <prompt> --model <model> --output-format text` |
|
|
573
|
+
| Codex | `codex` | `exec --ephemeral --skip-git-repo-check <prompt> -m <model>` |
|
|
574
|
+
| Cursor | `cursor-agent` | `-p <prompt> --model <model> --output-format text` |
|
|
575
|
+
|
|
576
|
+
### API
|
|
577
|
+
|
|
578
|
+
#### `Backend.oneshot(prompt, opts)` (static)
|
|
579
|
+
|
|
580
|
+
| Parameter | Type | Default | Description |
|
|
581
|
+
|-----------|------|---------|-------------|
|
|
582
|
+
| `prompt` | `string` | — | Text prompt to send. |
|
|
583
|
+
| `opts.model` | `string` | `'fast'` | Model alias or exact string. |
|
|
584
|
+
| `opts.config` | `Config` | `undefined` | Config instance for model resolution. |
|
|
585
|
+
| `opts.timeout` | `number` | `30000` | Max execution time in ms. |
|
|
586
|
+
|
|
587
|
+
**Returns:** `Promise<string>` — LLM response text (trimmed).
|
|
588
|
+
|
|
589
|
+
#### `BackendRegistry.oneshot(prompt, opts)`
|
|
590
|
+
|
|
591
|
+
Same parameters as above. Runs discovery once (with optional `opts.config`), then tries backends in order until one succeeds.
|
|
592
|
+
|
|
593
|
+
## Session Persistence
|
|
594
|
+
|
|
595
|
+
Agents are spawned detached — they survive orchestrator shutdown. State is persisted to `~/.loreli/sessions/<id>/`:
|
|
596
|
+
|
|
597
|
+
```
|
|
598
|
+
~/.loreli/sessions/<id>/
|
|
599
|
+
config.json (repo, theme, strategy)
|
|
600
|
+
agents/
|
|
601
|
+
optimus-0.json (identity, state, paneId)
|
|
602
|
+
registry.json (name tracking)
|
|
603
|
+
logs/
|
|
604
|
+
orchestrator.log
|
|
605
|
+
optimus-0.log
|
|
606
|
+
```
|