loreli 0.0.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +710 -97
- package/bin/loreli.js +89 -0
- package/package.json +77 -14
- package/packages/README.md +101 -0
- package/packages/action/README.md +98 -0
- package/packages/action/prompts/action.md +172 -0
- package/packages/action/src/index.js +684 -0
- package/packages/agent/README.md +606 -0
- package/packages/agent/src/backends/claude.js +387 -0
- package/packages/agent/src/backends/codex.js +351 -0
- package/packages/agent/src/backends/cursor.js +371 -0
- package/packages/agent/src/backends/index.js +486 -0
- package/packages/agent/src/base.js +138 -0
- package/packages/agent/src/cli.js +275 -0
- package/packages/agent/src/discover.js +396 -0
- package/packages/agent/src/factory.js +124 -0
- package/packages/agent/src/index.js +12 -0
- package/packages/agent/src/models.js +159 -0
- package/packages/agent/src/output.js +62 -0
- package/packages/agent/src/session.js +162 -0
- package/packages/agent/src/trace.js +186 -0
- package/packages/classify/README.md +136 -0
- package/packages/classify/prompts/blocker.md +12 -0
- package/packages/classify/prompts/feedback.md +14 -0
- package/packages/classify/prompts/pane-state.md +20 -0
- package/packages/classify/src/index.js +81 -0
- package/packages/config/README.md +898 -0
- package/packages/config/src/defaults.js +145 -0
- package/packages/config/src/index.js +223 -0
- package/packages/config/src/schema.js +291 -0
- package/packages/config/src/validate.js +160 -0
- package/packages/context/README.md +165 -0
- package/packages/context/src/index.js +198 -0
- package/packages/hub/README.md +338 -0
- package/packages/hub/src/base.js +154 -0
- package/packages/hub/src/github.js +1597 -0
- package/packages/hub/src/index.js +79 -0
- package/packages/hub/src/labels.js +48 -0
- package/packages/identity/README.md +288 -0
- package/packages/identity/src/index.js +620 -0
- package/packages/identity/src/themes/avatar.js +217 -0
- package/packages/identity/src/themes/digimon.js +217 -0
- package/packages/identity/src/themes/dragonball.js +217 -0
- package/packages/identity/src/themes/lotr.js +217 -0
- package/packages/identity/src/themes/marvel.js +217 -0
- package/packages/identity/src/themes/pokemon.js +217 -0
- package/packages/identity/src/themes/starwars.js +217 -0
- package/packages/identity/src/themes/transformers.js +217 -0
- package/packages/identity/src/themes/zelda.js +217 -0
- package/packages/knowledge/README.md +217 -0
- package/packages/knowledge/src/index.js +243 -0
- package/packages/log/README.md +93 -0
- package/packages/log/src/index.js +252 -0
- package/packages/marker/README.md +200 -0
- package/packages/marker/src/index.js +184 -0
- package/packages/mcp/README.md +323 -0
- package/packages/mcp/instructions.md +126 -0
- package/packages/mcp/scaffolding/.agents/skills/loreli-context/SKILL.md +89 -0
- package/packages/mcp/scaffolding/ISSUE_TEMPLATE/config.yml +2 -0
- package/packages/mcp/scaffolding/ISSUE_TEMPLATE/loreli.yml +83 -0
- package/packages/mcp/scaffolding/loreli.yml +491 -0
- package/packages/mcp/scaffolding/mcp-configs/.codex/config.toml +4 -0
- package/packages/mcp/scaffolding/mcp-configs/.cursor/mcp.json +14 -0
- package/packages/mcp/scaffolding/mcp-configs/.mcp.json +14 -0
- package/packages/mcp/scaffolding/pull-request.md +23 -0
- package/packages/mcp/src/index.js +600 -0
- package/packages/mcp/src/tools/agent-context.js +44 -0
- package/packages/mcp/src/tools/agents.js +450 -0
- package/packages/mcp/src/tools/context.js +200 -0
- package/packages/mcp/src/tools/github.js +1163 -0
- package/packages/mcp/src/tools/hitl.js +162 -0
- package/packages/mcp/src/tools/index.js +18 -0
- package/packages/mcp/src/tools/refactor.js +227 -0
- package/packages/mcp/src/tools/repo.js +44 -0
- package/packages/mcp/src/tools/start.js +904 -0
- package/packages/mcp/src/tools/status.js +149 -0
- package/packages/mcp/src/tools/work.js +134 -0
- package/packages/orchestrator/README.md +192 -0
- package/packages/orchestrator/src/index.js +1492 -0
- package/packages/planner/README.md +251 -0
- package/packages/planner/prompts/plan-reviewer.md +109 -0
- package/packages/planner/prompts/planner.md +191 -0
- package/packages/planner/prompts/tiebreaker-reviewer.md +71 -0
- package/packages/planner/src/index.js +1381 -0
- package/packages/review/README.md +129 -0
- package/packages/review/prompts/reviewer.md +158 -0
- package/packages/review/src/index.js +1403 -0
- package/packages/risk/README.md +178 -0
- package/packages/risk/prompts/risk.md +272 -0
- package/packages/risk/src/index.js +439 -0
- package/packages/session/README.md +165 -0
- package/packages/session/src/index.js +215 -0
- package/packages/test-utils/README.md +96 -0
- package/packages/test-utils/src/index.js +354 -0
- package/packages/tmux/README.md +261 -0
- package/packages/tmux/src/index.js +501 -0
- package/packages/workflow/README.md +317 -0
- package/packages/workflow/prompts/preamble.md +14 -0
- package/packages/workflow/src/index.js +660 -0
- package/packages/workflow/src/proof-of-life.js +74 -0
- package/packages/workspace/README.md +143 -0
- package/packages/workspace/src/index.js +1127 -0
- package/index.js +0 -8
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import { has } from 'loreli/marker';
|
|
2
|
+
import { select } from './repo.js';
|
|
3
|
+
import { check } from 'loreli/config';
|
|
4
|
+
|
|
5
|
+
export default {
|
|
6
|
+
team_status: {
|
|
7
|
+
title: 'Team Status',
|
|
8
|
+
description: 'Dashboard showing open issues, active PRs, agent states, review loops, merged count, and PRs awaiting human review.',
|
|
9
|
+
schema: {
|
|
10
|
+
type: 'object',
|
|
11
|
+
properties: {
|
|
12
|
+
repo: { type: 'string', description: 'Target repository (owner/name).' }
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* @param {object} args - Tool arguments.
|
|
18
|
+
* @param {object} ctx - Execution context.
|
|
19
|
+
* @returns {Promise<object>} Status dashboard.
|
|
20
|
+
*/
|
|
21
|
+
async exec(args, ctx) {
|
|
22
|
+
// Validate repo format eagerly before any other guard so callers always
|
|
23
|
+
// get a thrown error on malformed input regardless of hub state.
|
|
24
|
+
if (args.repo) check.repo(args.repo);
|
|
25
|
+
|
|
26
|
+
// When the user explicitly requests a repo but hub is not initialized,
|
|
27
|
+
// tell them to run start. This matches the pattern in start_work
|
|
28
|
+
// and start_planning. We only guard on args.repo (explicit request),
|
|
29
|
+
// not ctx.repo (set during start) — agent/storage status still
|
|
30
|
+
// works without GitHub.
|
|
31
|
+
if (args.repo && !ctx.hub) {
|
|
32
|
+
return { content: [{ type: 'text', text: 'Run start first to initialize the hub.' }], isError: true };
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const repo = select(args.repo, ctx);
|
|
36
|
+
const lines = [`Team Status for ${repo ?? 'unknown repo'}`];
|
|
37
|
+
|
|
38
|
+
// --- Agent States ---
|
|
39
|
+
const orch = ctx.orchestrator;
|
|
40
|
+
if (orch?.reconcile) await orch.reconcile();
|
|
41
|
+
const agents = orch ? [...orch.agents.entries()] : [];
|
|
42
|
+
lines.push('', `Agents: ${agents.length}`);
|
|
43
|
+
|
|
44
|
+
for (const [name, agent] of agents) {
|
|
45
|
+
lines.push(` ${name} | ${agent.role ?? '?'} | ${agent.state ?? '?'} | ${agent.identity?.provider ?? '?'}`);
|
|
46
|
+
|
|
47
|
+
try {
|
|
48
|
+
if (typeof agent.capture === 'function') {
|
|
49
|
+
const output = (await agent.capture(20))?.trim();
|
|
50
|
+
if (output) {
|
|
51
|
+
lines.push(` Terminal:\n${output.split('\n').map(function indent(l) { return ` ${l}`; }).join('\n')}`);
|
|
52
|
+
} else {
|
|
53
|
+
lines.push(' Terminal: (no output)');
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
} catch { lines.push(' Terminal: (unavailable)'); }
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// --- Live GitHub data (only if hub is available) ---
|
|
60
|
+
if (ctx.hub && repo) {
|
|
61
|
+
try {
|
|
62
|
+
const [openIssues, openPRs, closedPRs] = await Promise.all([
|
|
63
|
+
ctx.hub.issues(repo, { state: 'open' }),
|
|
64
|
+
ctx.hub.pulls(repo, { state: 'open' }),
|
|
65
|
+
ctx.hub.pulls(repo, { state: 'closed' })
|
|
66
|
+
]);
|
|
67
|
+
|
|
68
|
+
const merged = closedPRs.filter(function isMerged(pr) { return pr.merged; });
|
|
69
|
+
|
|
70
|
+
lines.push('', `Open issues: ${openIssues.length}`);
|
|
71
|
+
lines.push(`Open PRs: ${openPRs.length}`);
|
|
72
|
+
lines.push(`Merged PRs: ${merged.length}`);
|
|
73
|
+
|
|
74
|
+
// Match PRs to agents by branch naming convention: {agent-name}/issue-{number}
|
|
75
|
+
if (openPRs.length) {
|
|
76
|
+
lines.push('', 'Active PRs:');
|
|
77
|
+
for (const pr of openPRs) {
|
|
78
|
+
const branchMatch = pr.head.match(/^(.+)\/issue-(\d+)$/);
|
|
79
|
+
const agentName = branchMatch ? branchMatch[1] : 'unknown';
|
|
80
|
+
const issueNum = branchMatch ? `#${branchMatch[2]}` : '?';
|
|
81
|
+
|
|
82
|
+
// Count review rounds
|
|
83
|
+
let reviewCount = 0;
|
|
84
|
+
try {
|
|
85
|
+
const revs = await ctx.hub.reviews(repo, pr.number);
|
|
86
|
+
reviewCount = revs.length;
|
|
87
|
+
} catch { /* best-effort */ }
|
|
88
|
+
|
|
89
|
+
lines.push(` PR #${pr.number}: ${pr.title} (agent: ${agentName}, issue: ${issueNum}, reviews: ${reviewCount})`);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
} catch {
|
|
93
|
+
lines.push('', 'GitHub data: unavailable (hub error)');
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// --- Awaiting human review (from GitHub) ---
|
|
98
|
+
// Derive HITL state from GitHub labels and markers instead of local
|
|
99
|
+
// session storage. Any participant can see PRs awaiting human
|
|
100
|
+
// attention via the loreli:needs-attention label and hitl marker.
|
|
101
|
+
if (ctx.hub && repo) {
|
|
102
|
+
try {
|
|
103
|
+
const needsAttention = (await ctx.hub.pulls(repo, { state: 'open' }))
|
|
104
|
+
.filter(function isGated(pr) {
|
|
105
|
+
return pr.labels?.some(function isNA(l) {
|
|
106
|
+
return (l.name ?? l) === 'loreli:needs-attention';
|
|
107
|
+
});
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
if (needsAttention.length) {
|
|
111
|
+
lines.push('', `Awaiting human review: ${needsAttention.length}`);
|
|
112
|
+
for (const pr of needsAttention) {
|
|
113
|
+
let elapsed = 0;
|
|
114
|
+
try {
|
|
115
|
+
const comments = await ctx.hub.comments(repo, pr.number);
|
|
116
|
+
const hitlComment = comments.find(function isHitl(c) {
|
|
117
|
+
return has(c.body, 'hitl');
|
|
118
|
+
});
|
|
119
|
+
if (hitlComment) {
|
|
120
|
+
const ts = hitlComment.created ?? hitlComment.created_at;
|
|
121
|
+
elapsed = Math.round((Date.now() - new Date(ts).getTime()) / 60000);
|
|
122
|
+
}
|
|
123
|
+
} catch { /* best-effort */ }
|
|
124
|
+
|
|
125
|
+
// Extract reviewers from PR requested_reviewers if available
|
|
126
|
+
const reviewers = pr.requested_reviewers?.map(function name(r) { return r.login ?? r; }) ?? [];
|
|
127
|
+
|
|
128
|
+
lines.push(` PR #${pr.number}: ${pr.title} — reviewers: ${reviewers.join(', ') || 'none'} — waiting: ${elapsed}m`);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
} catch { /* best-effort — fall through */ }
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// --- Rate Limit ---
|
|
135
|
+
if (ctx.hub?.rates) {
|
|
136
|
+
const rl = ctx.hub.rates();
|
|
137
|
+
if (rl.remaining !== null) {
|
|
138
|
+
const pct = rl.ratio !== null ? ` (${Math.round(rl.ratio * 100)}%)` : '';
|
|
139
|
+
const resetStr = rl.reset ? ` resets ${rl.reset}` : '';
|
|
140
|
+
lines.push('', `Rate limit: ${rl.remaining}/${rl.limit}${pct}${resetStr}`);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
content: [{ type: 'text', text: lines.join('\n') }]
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
};
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { logger } from 'loreli/log';
|
|
2
|
+
import { select } from './repo.js';
|
|
3
|
+
|
|
4
|
+
const log = logger('work');
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Work orchestration tools.
|
|
8
|
+
*
|
|
9
|
+
* These tools drive the plan-action-review loop by delegating to
|
|
10
|
+
* autonomous agents. Agents have their own GitHub access and interact
|
|
11
|
+
* with the repo directly; the orchestrator monitors via GitHub activity
|
|
12
|
+
* and tmux capture.
|
|
13
|
+
*/
|
|
14
|
+
export default {
|
|
15
|
+
start_planning: {
|
|
16
|
+
title: 'Start Planning',
|
|
17
|
+
description: 'Activate planner agent(s) to analyze the repository and create draft work items on the GitHub project board.',
|
|
18
|
+
schema: {
|
|
19
|
+
type: 'object',
|
|
20
|
+
properties: {
|
|
21
|
+
repo: { type: 'string', description: 'Target repository (owner/name).' },
|
|
22
|
+
objective: { type: 'string', description: 'What should the planner analyze and plan for?' }
|
|
23
|
+
},
|
|
24
|
+
required: ['objective']
|
|
25
|
+
},
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* @param {object} args - Tool arguments.
|
|
29
|
+
* @param {object} ctx - Execution context.
|
|
30
|
+
* @returns {Promise<object>} Planning initiation result.
|
|
31
|
+
*/
|
|
32
|
+
async exec(args, ctx) {
|
|
33
|
+
const repo = select(args.repo, ctx);
|
|
34
|
+
const objective = args.objective;
|
|
35
|
+
if (!repo) {
|
|
36
|
+
return {
|
|
37
|
+
content: [{ type: 'text', text: 'No repository configured. Pass --repo, run start, set LORELI_REPO, or set repo in loreli.yml.' }],
|
|
38
|
+
isError: true
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if (!ctx.hub) {
|
|
43
|
+
return { content: [{ type: 'text', text: 'Run start first to initialize the hub.' }], isError: true };
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
log.info(`start_planning: ${repo} — ${objective}`);
|
|
47
|
+
|
|
48
|
+
// Delegate to planner workflow which handles category discovery, prompt building, and dispatch
|
|
49
|
+
const result = await ctx.planner.plan(repo, objective);
|
|
50
|
+
|
|
51
|
+
// Persist categoryId in context for downstream tools (escalate, etc.)
|
|
52
|
+
ctx.categoryId = result.categoryId;
|
|
53
|
+
|
|
54
|
+
// Start the reactor loop so planner handlers (review, revise, promote)
|
|
55
|
+
// run automatically. Without this, discussions sit unreviewed forever.
|
|
56
|
+
ctx.orchestrator.watch(repo);
|
|
57
|
+
ctx.orchestrator.monitor();
|
|
58
|
+
|
|
59
|
+
return {
|
|
60
|
+
content: [{
|
|
61
|
+
type: 'text',
|
|
62
|
+
text: [
|
|
63
|
+
`Planning initiated for ${repo}`,
|
|
64
|
+
`Objective: ${objective}`,
|
|
65
|
+
`Category: Loreli (${result.categoryId})`,
|
|
66
|
+
`Planners dispatched: ${result.planners.join(', ')}`,
|
|
67
|
+
'Plan review and promotion will run automatically via the reactor tick pipeline.'
|
|
68
|
+
].filter(Boolean).join('\n')
|
|
69
|
+
}]
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
},
|
|
73
|
+
|
|
74
|
+
start_work: {
|
|
75
|
+
title: 'Start Work',
|
|
76
|
+
description: 'Begin the action/review cycle. Action agents claim issues, do work, and create PRs. Review agents are auto-assigned based on yin/yang pairing.',
|
|
77
|
+
schema: {
|
|
78
|
+
type: 'object',
|
|
79
|
+
properties: {
|
|
80
|
+
repo: { type: 'string', description: 'Target repository (owner/name).' }
|
|
81
|
+
}
|
|
82
|
+
},
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* @param {object} args - Tool arguments.
|
|
86
|
+
* @param {object} ctx - Execution context.
|
|
87
|
+
* @returns {Promise<object>} Work cycle initiation result.
|
|
88
|
+
*/
|
|
89
|
+
async exec(args, ctx) {
|
|
90
|
+
const repo = select(args.repo, ctx);
|
|
91
|
+
if (!repo) {
|
|
92
|
+
return {
|
|
93
|
+
content: [{ type: 'text', text: 'No repository configured. Pass --repo, run start, set LORELI_REPO, or set repo in loreli.yml.' }],
|
|
94
|
+
isError: true
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
if (!ctx.hub) {
|
|
99
|
+
return { content: [{ type: 'text', text: 'Run start first to initialize the hub.' }], isError: true };
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
const actionAgents = ctx.action.agents?.() ?? [];
|
|
103
|
+
if (!actionAgents.length) {
|
|
104
|
+
throw new Error('No action agents available. Add an agent first.');
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
log.info(`start_work: ${repo}`);
|
|
108
|
+
|
|
109
|
+
// Delegate to action workflow which handles claim, label, prompt, dispatch
|
|
110
|
+
const assignments = await ctx.action.work(repo);
|
|
111
|
+
|
|
112
|
+
// Start stall detection monitor and the PR lifecycle reactor
|
|
113
|
+
ctx.orchestrator.monitor();
|
|
114
|
+
ctx.orchestrator.watch(repo);
|
|
115
|
+
|
|
116
|
+
return {
|
|
117
|
+
content: [{
|
|
118
|
+
type: 'text',
|
|
119
|
+
text: [
|
|
120
|
+
`Work cycle started for ${repo}`,
|
|
121
|
+
`Assigned: ${assignments.length}`,
|
|
122
|
+
'',
|
|
123
|
+
...assignments.map(function format(a) {
|
|
124
|
+
return `#${a.issue} -> ${a.agent} (reviewer: ${a.reviewer ?? 'none'})`;
|
|
125
|
+
})
|
|
126
|
+
].join('\n')
|
|
127
|
+
}]
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
},
|
|
131
|
+
|
|
132
|
+
// propose and escalate have been consolidated into the `plan` agent tool
|
|
133
|
+
// in packages/mcp/src/tools/github.js — see plan action=create and action=escalate
|
|
134
|
+
};
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# loreli/orchestrator
|
|
2
|
+
|
|
3
|
+
Generic agent lifecycle coordination via EventEmitter. Manages agent spawn/shutdown/kill, reactor polling, stall detection, and activity tracking. Contains zero role-specific logic — all planner/action/review behavior lives in the role packages.
|
|
4
|
+
|
|
5
|
+
## Research Findings
|
|
6
|
+
|
|
7
|
+
Node.js built-in `EventEmitter` provides everything needed for lifecycle event dispatch. No external libraries required.
|
|
8
|
+
|
|
9
|
+
## API Reference
|
|
10
|
+
|
|
11
|
+
### `Orchestrator` (extends EventEmitter)
|
|
12
|
+
|
|
13
|
+
```js
|
|
14
|
+
import { Orchestrator } from 'loreli/orchestrator';
|
|
15
|
+
|
|
16
|
+
const orch = new Orchestrator({
|
|
17
|
+
hub, identityRegistry, backendRegistry, storage, config
|
|
18
|
+
});
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
#### Constructor Options
|
|
22
|
+
|
|
23
|
+
| Option | Type | Description |
|
|
24
|
+
|--------|------|-------------|
|
|
25
|
+
| `hub` | `BaseHub` | Git hosting abstraction |
|
|
26
|
+
| `identityRegistry` | `Registry` | Identity assignment and pairing |
|
|
27
|
+
| `backendRegistry` | `BackendRegistry` | CLI backend discovery |
|
|
28
|
+
| `storage` | `Storage` | Session persistence |
|
|
29
|
+
| `config` | `Config` | Optional runtime config |
|
|
30
|
+
|
|
31
|
+
### Lifecycle Methods
|
|
32
|
+
|
|
33
|
+
#### `orchestrator.spawn(agent)` → Promise\<void\>
|
|
34
|
+
|
|
35
|
+
Spawn and register an agent with rollback support. Each step (process start, map registration, activity tracking) is tracked and reversed on failure.
|
|
36
|
+
|
|
37
|
+
**Emits**: `'spawned'` with `{ name, role, provider }`
|
|
38
|
+
|
|
39
|
+
#### `orchestrator.shutdown(name, timeout?, reason?)` → Promise\<{acknowledged}\>
|
|
40
|
+
|
|
41
|
+
Graceful 3-phase shutdown: send request, poll for acknowledgment, force stop.
|
|
42
|
+
|
|
43
|
+
**Emits**: `'removed'` with `{ name, reason: 'shutdown' }`
|
|
44
|
+
|
|
45
|
+
#### `orchestrator.kill(name)` → Promise\<void\>
|
|
46
|
+
|
|
47
|
+
Immediate termination. Last resort for stuck agents.
|
|
48
|
+
|
|
49
|
+
**Emits**: `'removed'` with `{ name, reason: 'killed' }`
|
|
50
|
+
|
|
51
|
+
### Coordination Methods
|
|
52
|
+
|
|
53
|
+
#### `orchestrator.enlist(provider, role, opts?)` → Promise\<object\>
|
|
54
|
+
|
|
55
|
+
Create and spawn a new agent via the Factory.
|
|
56
|
+
|
|
57
|
+
#### `orchestrator.activity(name)` → void
|
|
58
|
+
|
|
59
|
+
Record a heartbeat timestamp for an agent. Resets the stall timer. Called by workflow packages when they dispatch work to an agent.
|
|
60
|
+
|
|
61
|
+
#### `orchestrator.refresh(name)` → Promise\<boolean\>
|
|
62
|
+
|
|
63
|
+
Check whether an agent's tmux pane has new output since the last check. When output changes, the agent is provably active — `_lastActivity` is updated and `true` is returned. This is the ground truth signal that feeds into `health()` and, transitively, the proof-of-life responder.
|
|
64
|
+
|
|
65
|
+
Agent-side MCP tool calls (file reads, code analysis, `pr review`) don't update `_lastActivity` because the agent's MCP server runs in a separate process. But every agent action produces terminal output, which `refresh()` detects by comparing MD5 hashes of the last 50 lines of captured pane output.
|
|
66
|
+
|
|
67
|
+
| Parameter | Type | Description |
|
|
68
|
+
|-----------|------|-------------|
|
|
69
|
+
| `name` | `string` | Agent identity name |
|
|
70
|
+
|
|
71
|
+
**Returns**: `true` when pane output changed (agent is active), `false` otherwise.
|
|
72
|
+
|
|
73
|
+
#### `orchestrator.health(name)` → Promise\<{alive, status, details, outputLength?}\>
|
|
74
|
+
|
|
75
|
+
Multi-signal health assessment for a named agent. Evaluates tmux/process liveness, agent state machine, tmux pane activity (via `refresh()`), and activity recency.
|
|
76
|
+
|
|
77
|
+
| Signal | Check | Unhealthy When |
|
|
78
|
+
|--------|-------|----------------|
|
|
79
|
+
| Process | `agent.alive()` — is the tmux pane alive? | Pane is dead |
|
|
80
|
+
| State | `agent.state` — is the agent dormant? | State is `dormant` |
|
|
81
|
+
| Pane activity | `refresh()` — has tmux output changed? | (Updates `_lastActivity` if changed) |
|
|
82
|
+
| Activity | `_lastActivity` timestamp vs stall timeout | Last activity exceeds stall timeout |
|
|
83
|
+
| Output | `agent.capture()` — terminal output length | Included for diagnostics (not a failure signal) |
|
|
84
|
+
|
|
85
|
+
**Returns**: `{ alive: boolean, status: 'healthy' | 'unhealthy' | 'not-found', details: string, outputLength?: number }`
|
|
86
|
+
|
|
87
|
+
Used by the proof-of-life reactor responder to answer liveness queries. The `refresh()` call inside `health()` ensures that agents actively working (producing terminal output) are reported as healthy even when `_lastActivity` is stale — this is what makes PoL verdicts accurate for local agents.
|
|
88
|
+
|
|
89
|
+
#### `orchestrator._removed` → Set\<string\>
|
|
90
|
+
|
|
91
|
+
Tracks agents locally killed or shut down during this orchestrator's lifetime. Populated by `kill()` and `shutdown()`. Used by workflow eviction logic to distinguish between genuinely foreign agents (from other orchestrators) and agents this orchestrator already cleaned up — preventing unnecessary proof-of-life network calls.
|
|
92
|
+
|
|
93
|
+
### Scaling
|
|
94
|
+
|
|
95
|
+
#### `orchestrator.scale(repo)` → Promise\<Array\<{role, agent}\>\>
|
|
96
|
+
|
|
97
|
+
Demand-driven scaling: collect demand signals from all registered workflows, then spawn agents to fill deficits — respecting global caps, per-role caps, rate limits, and cooldowns.
|
|
98
|
+
|
|
99
|
+
Runs after all workflow handlers in the reactor chain so demand signals reflect the latest hydrated state. Spawns are capped by `maxPerTick` to avoid resource spikes.
|
|
100
|
+
|
|
101
|
+
**Priority order**: reviewer > risk > action > planner. Reviewers unblock merges, risk unblocks reviewers, so they are filled first when at the global cap.
|
|
102
|
+
|
|
103
|
+
**Cross-provider preference with topology fallback**: Reviewer and risk agents are spawned on the opposite side (yin/yang) of the action agents they will pair with when dual-side capability exists. In single-side environments, `scale()` falls back to same-side spawning so review/risk coverage remains functional.
|
|
104
|
+
|
|
105
|
+
**Provider alternation**: When spawning multiple agents for a role, providers are alternated (yin/yang) to maintain adversarial diversity.
|
|
106
|
+
|
|
107
|
+
**Provider alternation** ensures adversarial diversity. All backends (Claude, Cursor, Codex) are interactive and equally suitable for any role.
|
|
108
|
+
|
|
109
|
+
```js
|
|
110
|
+
const spawned = await orchestrator.scale('owner/repo');
|
|
111
|
+
// [{ role: 'reviewer', agent: 'jazz-0' }, { role: 'action', agent: 'optimus-1' }]
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
#### `orchestrator.workflows` → Map\<string, Workflow\>
|
|
115
|
+
|
|
116
|
+
Map of role name to workflow instance. Populated during start. Used by `scale()` to collect demand signals.
|
|
117
|
+
|
|
118
|
+
### Reactor (Polling Loop)
|
|
119
|
+
|
|
120
|
+
#### `orchestrator.register(name, handler)` → void
|
|
121
|
+
|
|
122
|
+
Register a reactor handler called on every tick.
|
|
123
|
+
|
|
124
|
+
#### `orchestrator.tick(repo)` → Promise\<void\>
|
|
125
|
+
|
|
126
|
+
Execute one polling iteration: call all registered handlers.
|
|
127
|
+
|
|
128
|
+
#### `orchestrator.watch(repo)` → void
|
|
129
|
+
|
|
130
|
+
Start the polling reactor loop on a configurable interval.
|
|
131
|
+
|
|
132
|
+
#### `orchestrator.unwatch()` → void
|
|
133
|
+
|
|
134
|
+
Stop the polling reactor loop.
|
|
135
|
+
|
|
136
|
+
### Monitor (Stall Detection)
|
|
137
|
+
|
|
138
|
+
#### `orchestrator.monitor()` → void
|
|
139
|
+
|
|
140
|
+
Start stall detection with 3-tier escalation:
|
|
141
|
+
- **Tier 1** (1x timeout): Nudge agent with a status request
|
|
142
|
+
- **Tier 2** (2x timeout): Emit `'stall'` event with severity `'warning'`
|
|
143
|
+
- **Tier 3** (3x timeout): Kill agent and emit `'stall'` with `'critical'`
|
|
144
|
+
|
|
145
|
+
**Emits**: `'stall'` with `{ name, elapsed, severity }`
|
|
146
|
+
|
|
147
|
+
This serves as the safety net for agents that become stuck despite the autonomous-mode directives in their prompt templates. All agent prompts (action, planner, reviewer) include explicit instructions prohibiting interactive behavior — but if an agent ignores those directives and blocks on user input, stall detection will eventually kill it and release the claimed work for re-dispatch.
|
|
148
|
+
|
|
149
|
+
#### `orchestrator.stopMonitor()` → void
|
|
150
|
+
|
|
151
|
+
Stop the stall detection interval.
|
|
152
|
+
|
|
153
|
+
### Death Snapshots
|
|
154
|
+
|
|
155
|
+
#### `orchestrator.snapshot(name, agent)` -> Promise\<void\>
|
|
156
|
+
|
|
157
|
+
Capture a dying agent's terminal output and persist it to `~/.loreli/sessions/<sessionId>/logs/<name>.death.log`. Called automatically before `agent.stop()` in all exit paths (`reconcile`, `kill`, `shutdown`) so diagnostic output is never lost.
|
|
158
|
+
|
|
159
|
+
Requires `remain-on-exit` on the agent's tmux pane — interactive agents (`CliAgent.spawn()`) set this automatically so the pane survives after the process exits, preserving output for capture.
|
|
160
|
+
|
|
161
|
+
Non-fatal: silently skips when session or storage is unavailable, or when pane capture fails. Death snapshots are subject to the same session cleanup lifecycle as other session data — `Storage.prune()` removes them when the session expires.
|
|
162
|
+
|
|
163
|
+
```
|
|
164
|
+
~/.loreli/sessions/<sessionId>/logs/
|
|
165
|
+
optimus-0.death.log # captured terminal output at time of death
|
|
166
|
+
megatron-0.death.log
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
### Events
|
|
170
|
+
|
|
171
|
+
| Event | Payload | When |
|
|
172
|
+
|-------|---------|------|
|
|
173
|
+
| `spawned` | `{ name, role, provider }` | Agent successfully spawned |
|
|
174
|
+
| `removed` | `{ name, reason }` | Agent shut down or killed |
|
|
175
|
+
| `stall` | `{ name, elapsed, severity }` | Stall threshold exceeded |
|
|
176
|
+
| `rapid-death` | `{ name, backend, reason? }` | Agent died within seconds of spawn |
|
|
177
|
+
|
|
178
|
+
Role packages subscribe to these events via their `events()` method on the Workflow base class. The orchestrator itself never posts GitHub comments or claims issues — that's the role packages' responsibility.
|
|
179
|
+
|
|
180
|
+
## Errors
|
|
181
|
+
|
|
182
|
+
| Error | When | Resolution |
|
|
183
|
+
|-------|------|------------|
|
|
184
|
+
| `Agent "{name}" not found` | shutdown/kill called with unknown name | Check agent exists via `agents` Map |
|
|
185
|
+
| `Spawn failed` | Agent process could not start | Check backend CLI availability |
|
|
186
|
+
| `No backends available` | Factory has no discovered backends | Run `backendRegistry.discover()` |
|
|
187
|
+
|
|
188
|
+
## Scope Boundary
|
|
189
|
+
|
|
190
|
+
**In scope**: Agent lifecycle (spawn, shutdown, kill), reactor polling, stall detection, activity tracking, event emission, demand-driven scaling, agent health assessment.
|
|
191
|
+
|
|
192
|
+
**Out of scope**: Role-specific logic (claiming, reviewing, planning), GitHub comments, prompt rendering, workspace preparation.
|