@calltelemetry/openclaw-linear 0.3.1 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +322 -386
- package/index.ts +54 -2
- package/openclaw.plugin.json +9 -1
- package/package.json +3 -2
- package/src/active-session.ts +106 -0
- package/src/agent.ts +173 -1
- package/src/auth.ts +6 -2
- package/src/claude-tool.ts +280 -0
- package/src/cli-shared.ts +75 -0
- package/src/cli.ts +39 -0
- package/src/client.ts +1 -0
- package/src/code-tool.ts +202 -0
- package/src/codex-tool.ts +240 -0
- package/src/codex-worktree.ts +390 -0
- package/src/dispatch-service.ts +113 -0
- package/src/dispatch-state.ts +265 -0
- package/src/gemini-tool.ts +238 -0
- package/src/orchestration-tools.ts +134 -0
- package/src/pipeline.ts +343 -56
- package/src/tier-assess.ts +157 -0
- package/src/tools.ts +29 -79
- package/src/webhook.ts +532 -275
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import type { AnyAgentTool, OpenClawPluginApi } from "openclaw/plugin-sdk";
|
|
2
|
+
import { jsonResult } from "openclaw/plugin-sdk";
|
|
3
|
+
import { runAgent } from "./agent.js";
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Create orchestration tools that let agents delegate work to other crew agents.
|
|
7
|
+
*
|
|
8
|
+
* - spawn_agent: Fire-and-forget parallel delegation (non-blocking)
|
|
9
|
+
* - ask_agent: Synchronous question-answer with another agent
|
|
10
|
+
*/
|
|
11
|
+
export function createOrchestrationTools(
|
|
12
|
+
api: OpenClawPluginApi,
|
|
13
|
+
_ctx: Record<string, unknown>,
|
|
14
|
+
): AnyAgentTool[] {
|
|
15
|
+
return [
|
|
16
|
+
{
|
|
17
|
+
name: "spawn_agent",
|
|
18
|
+
label: "Spawn Agent",
|
|
19
|
+
description:
|
|
20
|
+
"Delegate a task to another crew agent. Runs in the background — does not block. " +
|
|
21
|
+
"Use this when you want to parallelize work (e.g., ask kaylee to investigate DB performance " +
|
|
22
|
+
"while you continue working on something else).",
|
|
23
|
+
parameters: {
|
|
24
|
+
type: "object",
|
|
25
|
+
properties: {
|
|
26
|
+
agentId: {
|
|
27
|
+
type: "string",
|
|
28
|
+
description:
|
|
29
|
+
"Which agent to dispatch (e.g., 'kaylee', 'inara', 'mal'). Must match an agent ID in openclaw.json.",
|
|
30
|
+
},
|
|
31
|
+
task: {
|
|
32
|
+
type: "string",
|
|
33
|
+
description: "Description of what the sub-agent should do.",
|
|
34
|
+
},
|
|
35
|
+
timeoutSeconds: {
|
|
36
|
+
type: "number",
|
|
37
|
+
description: "Max runtime in seconds (default: 300).",
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
required: ["agentId", "task"],
|
|
41
|
+
},
|
|
42
|
+
execute: async (_toolCallId: string, { agentId, task, timeoutSeconds }: {
|
|
43
|
+
agentId: string;
|
|
44
|
+
task: string;
|
|
45
|
+
timeoutSeconds?: number;
|
|
46
|
+
}) => {
|
|
47
|
+
const timeout = (timeoutSeconds ?? 300) * 1000;
|
|
48
|
+
const sessionId = `spawn-${agentId}-${Date.now()}`;
|
|
49
|
+
|
|
50
|
+
api.logger.info(`spawn_agent: dispatching ${agentId} — "${task.slice(0, 80)}..."`);
|
|
51
|
+
|
|
52
|
+
// Fire and forget — don't await the full result
|
|
53
|
+
const resultPromise = runAgent({
|
|
54
|
+
api,
|
|
55
|
+
agentId,
|
|
56
|
+
sessionId,
|
|
57
|
+
message: task,
|
|
58
|
+
timeoutMs: timeout,
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
// Store the promise so it can be retrieved later if needed
|
|
62
|
+
resultPromise.catch((err) => {
|
|
63
|
+
api.logger.error(`spawn_agent ${agentId} failed: ${err}`);
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
return jsonResult({
|
|
67
|
+
message: `Dispatched task to agent '${agentId}'. It is running in the background.`,
|
|
68
|
+
agentId,
|
|
69
|
+
sessionId,
|
|
70
|
+
});
|
|
71
|
+
},
|
|
72
|
+
} as unknown as AnyAgentTool,
|
|
73
|
+
|
|
74
|
+
{
|
|
75
|
+
name: "ask_agent",
|
|
76
|
+
label: "Ask Agent",
|
|
77
|
+
description:
|
|
78
|
+
"Ask another crew agent a question and wait for their reply. " +
|
|
79
|
+
"Use this when you need a specific answer before proceeding " +
|
|
80
|
+
"(e.g., 'wash, would this schema change break existing tests?').",
|
|
81
|
+
parameters: {
|
|
82
|
+
type: "object",
|
|
83
|
+
properties: {
|
|
84
|
+
agentId: {
|
|
85
|
+
type: "string",
|
|
86
|
+
description:
|
|
87
|
+
"Which agent to ask (e.g., 'kaylee', 'inara', 'mal'). Must match an agent ID in openclaw.json.",
|
|
88
|
+
},
|
|
89
|
+
message: {
|
|
90
|
+
type: "string",
|
|
91
|
+
description: "The question or request for the other agent.",
|
|
92
|
+
},
|
|
93
|
+
timeoutSeconds: {
|
|
94
|
+
type: "number",
|
|
95
|
+
description: "How long to wait for a reply in seconds (default: 120).",
|
|
96
|
+
},
|
|
97
|
+
},
|
|
98
|
+
required: ["agentId", "message"],
|
|
99
|
+
},
|
|
100
|
+
execute: async (_toolCallId: string, { agentId, message, timeoutSeconds }: {
|
|
101
|
+
agentId: string;
|
|
102
|
+
message: string;
|
|
103
|
+
timeoutSeconds?: number;
|
|
104
|
+
}) => {
|
|
105
|
+
const timeout = (timeoutSeconds ?? 120) * 1000;
|
|
106
|
+
const sessionId = `ask-${agentId}-${Date.now()}`;
|
|
107
|
+
|
|
108
|
+
api.logger.info(`ask_agent: asking ${agentId} — "${message.slice(0, 80)}..."`);
|
|
109
|
+
|
|
110
|
+
const result = await runAgent({
|
|
111
|
+
api,
|
|
112
|
+
agentId,
|
|
113
|
+
sessionId,
|
|
114
|
+
message,
|
|
115
|
+
timeoutMs: timeout,
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
if (!result.success) {
|
|
119
|
+
return jsonResult({
|
|
120
|
+
message: `Agent '${agentId}' failed to respond.`,
|
|
121
|
+
error: result.output.slice(0, 1000),
|
|
122
|
+
agentId,
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return jsonResult({
|
|
127
|
+
message: `Response from agent '${agentId}':`,
|
|
128
|
+
agentId,
|
|
129
|
+
response: result.output,
|
|
130
|
+
});
|
|
131
|
+
},
|
|
132
|
+
} as unknown as AnyAgentTool,
|
|
133
|
+
];
|
|
134
|
+
}
|
package/src/pipeline.ts
CHANGED
|
@@ -1,6 +1,13 @@
|
|
|
1
1
|
import type { OpenClawPluginApi } from "openclaw/plugin-sdk";
|
|
2
2
|
import type { LinearAgentApi, ActivityContent } from "./linear-api.js";
|
|
3
3
|
import { runAgent } from "./agent.js";
|
|
4
|
+
import { setActiveSession, clearActiveSession } from "./active-session.js";
|
|
5
|
+
import type { Tier } from "./dispatch-state.js";
|
|
6
|
+
import { runCodex } from "./codex-tool.js";
|
|
7
|
+
import { runClaude } from "./claude-tool.js";
|
|
8
|
+
import { runGemini } from "./gemini-tool.js";
|
|
9
|
+
import { resolveCodingBackend, loadCodingConfig, type CodingBackend } from "./code-tool.js";
|
|
10
|
+
import type { CliResult } from "./cli-shared.js";
|
|
4
11
|
|
|
5
12
|
export interface PipelineContext {
|
|
6
13
|
api: OpenClawPluginApi;
|
|
@@ -14,18 +21,78 @@ export interface PipelineContext {
|
|
|
14
21
|
description?: string | null;
|
|
15
22
|
};
|
|
16
23
|
promptContext?: unknown;
|
|
24
|
+
/** Populated by implementor stage if Codex creates a worktree */
|
|
25
|
+
worktreePath?: string | null;
|
|
26
|
+
/** Codex branch name, e.g. codex/UAT-123 */
|
|
27
|
+
codexBranch?: string | null;
|
|
28
|
+
/** Complexity tier selected by tier assessment */
|
|
29
|
+
tier?: Tier;
|
|
30
|
+
/** Tier model ID — for display/tracking only, NOT passed to coding CLI */
|
|
31
|
+
model?: string;
|
|
17
32
|
}
|
|
18
33
|
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
// Helpers
|
|
36
|
+
// ---------------------------------------------------------------------------
|
|
37
|
+
|
|
19
38
|
function emit(ctx: PipelineContext, content: ActivityContent): Promise<void> {
|
|
20
39
|
return ctx.linearApi.emitActivity(ctx.agentSessionId, content).catch((err) => {
|
|
21
|
-
ctx.api.logger.error(`
|
|
40
|
+
ctx.api.logger.error(`[${ctx.issue.identifier}] emit failed: ${err}`);
|
|
22
41
|
});
|
|
23
42
|
}
|
|
24
43
|
|
|
25
|
-
|
|
44
|
+
/** Resolve the agent's model string from config for logging/display. */
|
|
45
|
+
function resolveAgentModel(api: OpenClawPluginApi, agentId: string): string {
|
|
46
|
+
try {
|
|
47
|
+
const config = (api as any).runtime?.config?.getCachedConfig?.() ?? {};
|
|
48
|
+
const agents = config?.agents?.list as Array<Record<string, any>> | undefined;
|
|
49
|
+
const entry = agents?.find((a) => a.id === agentId);
|
|
50
|
+
const modelRef: string =
|
|
51
|
+
entry?.model?.primary ??
|
|
52
|
+
config?.agents?.defaults?.model?.primary ??
|
|
53
|
+
"unknown";
|
|
54
|
+
// Strip provider prefix for display: "openrouter/moonshotai/kimi-k2.5" → "kimi-k2.5"
|
|
55
|
+
const parts = modelRef.split("/");
|
|
56
|
+
return parts.length > 1 ? parts.slice(1).join("/") : modelRef;
|
|
57
|
+
} catch {
|
|
58
|
+
return "unknown";
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
function elapsed(startMs: number): string {
|
|
63
|
+
const sec = ((Date.now() - startMs) / 1000).toFixed(1);
|
|
64
|
+
return `${sec}s`;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function toolContext(ctx: PipelineContext): string {
|
|
68
|
+
const lines = [
|
|
69
|
+
`\n## code_run Tool`,
|
|
70
|
+
`When calling \`code_run\`, pass these parameters:`,
|
|
71
|
+
];
|
|
72
|
+
lines.push(`- \`prompt\`: describe what to implement (be specific — file paths, function names, expected behavior)`);
|
|
73
|
+
if (ctx.worktreePath) {
|
|
74
|
+
lines.push(`- \`workingDir\`: \`"${ctx.worktreePath}"\``);
|
|
75
|
+
}
|
|
76
|
+
// Don't suggest model override — each coding CLI uses its own configured model
|
|
77
|
+
lines.push(`Progress streams to Linear automatically. The worktree is an isolated git branch for this issue.`);
|
|
78
|
+
return lines.join("\n");
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
const TAG = (ctx: PipelineContext) => `Pipeline [${ctx.issue.identifier}]`;
|
|
82
|
+
|
|
83
|
+
// ---------------------------------------------------------------------------
|
|
84
|
+
// Stage 1: Planner
|
|
85
|
+
// ---------------------------------------------------------------------------
|
|
26
86
|
|
|
27
87
|
export async function runPlannerStage(ctx: PipelineContext): Promise<string | null> {
|
|
28
|
-
|
|
88
|
+
const t0 = Date.now();
|
|
89
|
+
const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
|
|
90
|
+
|
|
91
|
+
ctx.api.logger.info(`${TAG(ctx)} stage 1/3: planner starting (agent=${ctx.agentId}, model=${agentModel})`);
|
|
92
|
+
await emit(ctx, {
|
|
93
|
+
type: "thought",
|
|
94
|
+
body: `[1/3 Plan] Analyzing ${ctx.issue.identifier} with ${ctx.agentId} (${agentModel})...`,
|
|
95
|
+
});
|
|
29
96
|
|
|
30
97
|
const issueDetails = await ctx.linearApi.getIssueDetails(ctx.issue.id).catch(() => null);
|
|
31
98
|
|
|
@@ -54,90 +121,244 @@ ${ctx.promptContext ? `**Additional context:**\n${JSON.stringify(ctx.promptConte
|
|
|
54
121
|
4. Note any risks or dependencies
|
|
55
122
|
5. Output your plan in markdown format
|
|
56
123
|
|
|
124
|
+
IMPORTANT: Do NOT call code_run or any coding tools. Your job is ONLY to analyze and write a plan. The implementor stage will execute the plan using code_run after you're done.
|
|
125
|
+
|
|
57
126
|
Output ONLY the plan, nothing else.`;
|
|
58
127
|
|
|
59
|
-
await emit(ctx, {
|
|
128
|
+
await emit(ctx, {
|
|
129
|
+
type: "action",
|
|
130
|
+
action: "Planning",
|
|
131
|
+
parameter: `${ctx.issue.identifier} — agent: ${ctx.agentId} (${agentModel})`,
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
const sessionId = `linear-plan-${ctx.agentSessionId}`;
|
|
135
|
+
ctx.api.logger.info(`${TAG(ctx)} planner: spawning agent session=${sessionId}`);
|
|
60
136
|
|
|
61
137
|
const result = await runAgent({
|
|
62
138
|
api: ctx.api,
|
|
63
139
|
agentId: ctx.agentId,
|
|
64
|
-
sessionId
|
|
140
|
+
sessionId,
|
|
65
141
|
message,
|
|
66
142
|
timeoutMs: 5 * 60_000,
|
|
67
143
|
});
|
|
68
144
|
|
|
69
145
|
if (!result.success) {
|
|
70
|
-
|
|
146
|
+
ctx.api.logger.error(`${TAG(ctx)} planner failed after ${elapsed(t0)}: ${result.output.slice(0, 300)}`);
|
|
147
|
+
await emit(ctx, {
|
|
148
|
+
type: "error",
|
|
149
|
+
body: `[1/3 Plan] Failed after ${elapsed(t0)}: ${result.output.slice(0, 400)}`,
|
|
150
|
+
});
|
|
71
151
|
return null;
|
|
72
152
|
}
|
|
73
153
|
|
|
74
154
|
const plan = result.output;
|
|
155
|
+
ctx.api.logger.info(`${TAG(ctx)} planner completed in ${elapsed(t0)} (${plan.length} chars)`);
|
|
75
156
|
|
|
76
157
|
// Post plan as a Linear comment
|
|
77
158
|
await ctx.linearApi.createComment(
|
|
78
159
|
ctx.issue.id,
|
|
79
|
-
`## Implementation Plan\n\n${plan}\n\n---\n*
|
|
160
|
+
`## Implementation Plan\n\n${plan}\n\n---\n*Proceeding to implementation...*`,
|
|
80
161
|
);
|
|
81
162
|
|
|
82
163
|
await emit(ctx, {
|
|
83
|
-
type: "
|
|
84
|
-
|
|
164
|
+
type: "action",
|
|
165
|
+
action: "Plan complete",
|
|
166
|
+
parameter: `${ctx.issue.identifier} — ${elapsed(t0)}, moving to implementation`,
|
|
85
167
|
});
|
|
86
168
|
|
|
87
169
|
return plan;
|
|
88
170
|
}
|
|
89
171
|
|
|
90
|
-
//
|
|
172
|
+
// ---------------------------------------------------------------------------
|
|
173
|
+
// Stage 2: Implementor
|
|
174
|
+
// ---------------------------------------------------------------------------
|
|
175
|
+
//
|
|
176
|
+
// Deterministic: pipeline CODE calls the coding CLI directly.
|
|
177
|
+
// The agent model only evaluates results between runs.
|
|
178
|
+
|
|
179
|
+
const BACKEND_RUNNERS: Record<
|
|
180
|
+
CodingBackend,
|
|
181
|
+
(api: OpenClawPluginApi, params: any, pluginConfig?: Record<string, unknown>) => Promise<CliResult>
|
|
182
|
+
> = {
|
|
183
|
+
codex: runCodex,
|
|
184
|
+
claude: runClaude,
|
|
185
|
+
gemini: runGemini,
|
|
186
|
+
};
|
|
91
187
|
|
|
92
188
|
export async function runImplementorStage(
|
|
93
189
|
ctx: PipelineContext,
|
|
94
190
|
plan: string,
|
|
95
191
|
): Promise<string | null> {
|
|
96
|
-
|
|
192
|
+
const t0 = Date.now();
|
|
193
|
+
const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
|
|
194
|
+
const pluginConfig = (ctx.api as any).pluginConfig as Record<string, unknown> | undefined;
|
|
195
|
+
|
|
196
|
+
// Resolve coding backend from config (coding-tools.json)
|
|
197
|
+
const codingConfig = loadCodingConfig();
|
|
198
|
+
const backend = resolveCodingBackend(codingConfig);
|
|
199
|
+
const runner = BACKEND_RUNNERS[backend];
|
|
200
|
+
const backendName = backend.charAt(0).toUpperCase() + backend.slice(1);
|
|
201
|
+
|
|
202
|
+
ctx.api.logger.info(
|
|
203
|
+
`${TAG(ctx)} stage 2/3: implementor starting ` +
|
|
204
|
+
`(coding_cli=${backendName}, tier=${ctx.tier ?? "unknown"}, ` +
|
|
205
|
+
`worktree=${ctx.worktreePath ?? "default"}, ` +
|
|
206
|
+
`eval_agent=${ctx.agentId}, eval_model=${agentModel})`,
|
|
207
|
+
);
|
|
97
208
|
|
|
98
|
-
|
|
209
|
+
await emit(ctx, {
|
|
210
|
+
type: "thought",
|
|
211
|
+
body: `[2/3 Implement] Starting ${backendName} CLI → ${ctx.worktreePath ?? "default workspace"}`,
|
|
212
|
+
});
|
|
99
213
|
|
|
100
|
-
|
|
214
|
+
// Build the implementation prompt for the coding CLI
|
|
215
|
+
const codePrompt = [
|
|
216
|
+
`Implement the following plan for issue ${ctx.issue.identifier} — ${ctx.issue.title}.`,
|
|
217
|
+
``,
|
|
218
|
+
`## Plan`,
|
|
219
|
+
plan,
|
|
220
|
+
``,
|
|
221
|
+
`## Instructions`,
|
|
222
|
+
`- Follow the plan step by step`,
|
|
223
|
+
`- Create commits for each logical change`,
|
|
224
|
+
`- Run tests if the project has them`,
|
|
225
|
+
`- Stay within scope of the plan`,
|
|
226
|
+
].join("\n");
|
|
101
227
|
|
|
102
|
-
|
|
103
|
-
|
|
228
|
+
await emit(ctx, {
|
|
229
|
+
type: "action",
|
|
230
|
+
action: `Running ${backendName}`,
|
|
231
|
+
parameter: `${ctx.tier ?? "unknown"} tier — worktree: ${ctx.worktreePath ?? "default"}`,
|
|
232
|
+
});
|
|
104
233
|
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
5. Report what you did and any files changed
|
|
234
|
+
// Call the coding CLI directly — deterministic, not LLM choice.
|
|
235
|
+
// NOTE: Do NOT pass ctx.model here. The tier model (e.g. anthropic/claude-sonnet-4-6)
|
|
236
|
+
// is for tracking/display only. Each coding CLI uses its own configured model.
|
|
237
|
+
ctx.api.logger.info(`${TAG(ctx)} implementor: invoking ${backendName} CLI (no model override — CLI uses its own config)`);
|
|
238
|
+
const cliStart = Date.now();
|
|
111
239
|
|
|
112
|
-
|
|
240
|
+
const codeResult = await runner(ctx.api, {
|
|
241
|
+
prompt: codePrompt,
|
|
242
|
+
workingDir: ctx.worktreePath ?? undefined,
|
|
243
|
+
timeoutMs: 10 * 60_000,
|
|
244
|
+
}, pluginConfig);
|
|
245
|
+
|
|
246
|
+
const cliElapsed = elapsed(cliStart);
|
|
247
|
+
|
|
248
|
+
if (!codeResult.success) {
|
|
249
|
+
ctx.api.logger.warn(
|
|
250
|
+
`${TAG(ctx)} implementor: ${backendName} CLI failed after ${cliElapsed} — ` +
|
|
251
|
+
`error: ${codeResult.error ?? "unknown"}, output: ${codeResult.output.slice(0, 300)}`,
|
|
252
|
+
);
|
|
253
|
+
await emit(ctx, {
|
|
254
|
+
type: "error",
|
|
255
|
+
body: `[2/3 Implement] ${backendName} failed after ${cliElapsed}: ${(codeResult.error ?? codeResult.output).slice(0, 400)}`,
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
// Ask the agent to evaluate the failure
|
|
259
|
+
ctx.api.logger.info(`${TAG(ctx)} implementor: spawning ${ctx.agentId} (${agentModel}) to evaluate failure`);
|
|
260
|
+
await emit(ctx, {
|
|
261
|
+
type: "action",
|
|
262
|
+
action: "Evaluating failure",
|
|
263
|
+
parameter: `${ctx.agentId} (${agentModel}) analyzing ${backendName} error`,
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
const evalResult = await runAgent({
|
|
267
|
+
api: ctx.api,
|
|
268
|
+
agentId: ctx.agentId,
|
|
269
|
+
sessionId: `linear-impl-eval-${ctx.agentSessionId}`,
|
|
270
|
+
message: `${backendName} failed to implement the plan for ${ctx.issue.identifier}.\n\n## Plan\n${plan}\n\n## ${backendName} Output\n${codeResult.output.slice(0, 3000)}\n\n## Error\n${codeResult.error ?? "unknown"}\n\nAnalyze the failure. Summarize what went wrong and suggest next steps. Be concise.`,
|
|
271
|
+
timeoutMs: 2 * 60_000,
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
const failureSummary = evalResult.success
|
|
275
|
+
? evalResult.output
|
|
276
|
+
: `Implementation failed and evaluation also failed: ${codeResult.output.slice(0, 500)}`;
|
|
277
|
+
|
|
278
|
+
await ctx.linearApi.createComment(
|
|
279
|
+
ctx.issue.id,
|
|
280
|
+
`## Implementation Failed\n\n**Backend:** ${backendName} (ran for ${cliElapsed})\n**Tier:** ${ctx.tier ?? "unknown"}\n\n${failureSummary}`,
|
|
281
|
+
);
|
|
113
282
|
|
|
114
|
-
|
|
283
|
+
return null;
|
|
284
|
+
}
|
|
115
285
|
|
|
116
|
-
|
|
286
|
+
ctx.api.logger.info(`${TAG(ctx)} implementor: ${backendName} CLI completed in ${cliElapsed} (${codeResult.output.length} chars output)`);
|
|
287
|
+
|
|
288
|
+
// Ask the agent to evaluate the result
|
|
289
|
+
const evalMessage = [
|
|
290
|
+
`${backendName} completed implementation for ${ctx.issue.identifier}. Evaluate the result.`,
|
|
291
|
+
``,
|
|
292
|
+
`## Original Plan`,
|
|
293
|
+
plan,
|
|
294
|
+
``,
|
|
295
|
+
`## ${backendName} Output`,
|
|
296
|
+
codeResult.output.slice(0, 5000),
|
|
297
|
+
``,
|
|
298
|
+
`## Worktree`,
|
|
299
|
+
`Path: ${ctx.worktreePath ?? "default"}`,
|
|
300
|
+
`Branch: ${ctx.codexBranch ?? "unknown"}`,
|
|
301
|
+
``,
|
|
302
|
+
`Summarize what was implemented, any issues found, and whether the plan was fully executed. Be concise.`,
|
|
303
|
+
].join("\n");
|
|
304
|
+
|
|
305
|
+
ctx.api.logger.info(`${TAG(ctx)} implementor: spawning ${ctx.agentId} (${agentModel}) to evaluate results`);
|
|
306
|
+
await emit(ctx, {
|
|
307
|
+
type: "action",
|
|
308
|
+
action: "Evaluating results",
|
|
309
|
+
parameter: `${ctx.agentId} (${agentModel}) reviewing ${backendName} output`,
|
|
310
|
+
});
|
|
311
|
+
|
|
312
|
+
const evalStart = Date.now();
|
|
313
|
+
const evalResult = await runAgent({
|
|
117
314
|
api: ctx.api,
|
|
118
315
|
agentId: ctx.agentId,
|
|
119
|
-
sessionId: `linear-impl-${ctx.agentSessionId}`,
|
|
120
|
-
message,
|
|
121
|
-
timeoutMs:
|
|
316
|
+
sessionId: `linear-impl-eval-${ctx.agentSessionId}`,
|
|
317
|
+
message: evalMessage,
|
|
318
|
+
timeoutMs: 3 * 60_000,
|
|
122
319
|
});
|
|
123
320
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
321
|
+
const summary = evalResult.success
|
|
322
|
+
? evalResult.output
|
|
323
|
+
: `Implementation completed but evaluation failed. ${backendName} output:\n${codeResult.output.slice(0, 2000)}`;
|
|
324
|
+
|
|
325
|
+
ctx.api.logger.info(
|
|
326
|
+
`${TAG(ctx)} implementor: evaluation ${evalResult.success ? "succeeded" : "failed"} in ${elapsed(evalStart)}, ` +
|
|
327
|
+
`total stage time: ${elapsed(t0)}`,
|
|
328
|
+
);
|
|
329
|
+
|
|
330
|
+
await emit(ctx, {
|
|
331
|
+
type: "action",
|
|
332
|
+
action: "Implementation complete",
|
|
333
|
+
parameter: `${backendName} ${cliElapsed} + eval ${elapsed(evalStart)} = ${elapsed(t0)} total`,
|
|
334
|
+
});
|
|
128
335
|
|
|
129
|
-
|
|
130
|
-
return result.output;
|
|
336
|
+
return summary;
|
|
131
337
|
}
|
|
132
338
|
|
|
133
|
-
//
|
|
339
|
+
// ---------------------------------------------------------------------------
|
|
340
|
+
// Stage 3: Auditor
|
|
341
|
+
// ---------------------------------------------------------------------------
|
|
134
342
|
|
|
135
343
|
export async function runAuditorStage(
|
|
136
344
|
ctx: PipelineContext,
|
|
137
345
|
plan: string,
|
|
138
346
|
implResult: string,
|
|
139
347
|
): Promise<void> {
|
|
140
|
-
|
|
348
|
+
const t0 = Date.now();
|
|
349
|
+
const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
|
|
350
|
+
|
|
351
|
+
ctx.api.logger.info(
|
|
352
|
+
`${TAG(ctx)} stage 3/3: auditor starting (agent=${ctx.agentId}, model=${agentModel})`,
|
|
353
|
+
);
|
|
354
|
+
await emit(ctx, {
|
|
355
|
+
type: "thought",
|
|
356
|
+
body: `[3/3 Audit] Reviewing implementation with ${ctx.agentId} (${agentModel})...`,
|
|
357
|
+
});
|
|
358
|
+
|
|
359
|
+
const worktreeInfo = ctx.worktreePath
|
|
360
|
+
? `\n## Worktree\nCode changes are at: \`${ctx.worktreePath}\` (branch: \`${ctx.codexBranch ?? "unknown"}\`)\n`
|
|
361
|
+
: "";
|
|
141
362
|
|
|
142
363
|
const message = `You are an auditor. Review this implementation against the original plan.
|
|
143
364
|
|
|
@@ -148,27 +369,41 @@ ${plan}
|
|
|
148
369
|
|
|
149
370
|
## Implementation Result:
|
|
150
371
|
${implResult}
|
|
151
|
-
|
|
372
|
+
${worktreeInfo}
|
|
152
373
|
## Instructions
|
|
153
374
|
1. Verify each plan step was completed
|
|
154
|
-
2. Check for any missed items
|
|
375
|
+
2. Check for any missed items — use \`ask_agent\` / \`spawn_agent\` for specialized review if needed
|
|
155
376
|
3. Note any concerns or improvements needed
|
|
156
377
|
4. Provide a pass/fail verdict with reasoning
|
|
157
378
|
5. Output a concise audit summary in markdown
|
|
379
|
+
${toolContext(ctx)}
|
|
158
380
|
|
|
159
381
|
Output ONLY the audit summary.`;
|
|
160
382
|
|
|
161
|
-
|
|
383
|
+
const sessionId = `linear-audit-${ctx.agentSessionId}`;
|
|
384
|
+
ctx.api.logger.info(`${TAG(ctx)} auditor: spawning agent session=${sessionId}`);
|
|
385
|
+
|
|
386
|
+
await emit(ctx, {
|
|
387
|
+
type: "action",
|
|
388
|
+
action: "Auditing",
|
|
389
|
+
parameter: `${ctx.issue.identifier} — agent: ${ctx.agentId} (${agentModel})`,
|
|
390
|
+
});
|
|
162
391
|
|
|
163
392
|
const result = await runAgent({
|
|
164
393
|
api: ctx.api,
|
|
165
394
|
agentId: ctx.agentId,
|
|
166
|
-
sessionId
|
|
395
|
+
sessionId,
|
|
167
396
|
message,
|
|
168
397
|
timeoutMs: 5 * 60_000,
|
|
169
398
|
});
|
|
170
399
|
|
|
171
|
-
const auditSummary = result.success
|
|
400
|
+
const auditSummary = result.success
|
|
401
|
+
? result.output
|
|
402
|
+
: `Audit failed: ${result.output.slice(0, 500)}`;
|
|
403
|
+
|
|
404
|
+
ctx.api.logger.info(
|
|
405
|
+
`${TAG(ctx)} auditor: ${result.success ? "completed" : "failed"} in ${elapsed(t0)} (${auditSummary.length} chars)`,
|
|
406
|
+
);
|
|
172
407
|
|
|
173
408
|
await ctx.linearApi.createComment(
|
|
174
409
|
ctx.issue.id,
|
|
@@ -177,36 +412,88 @@ Output ONLY the audit summary.`;
|
|
|
177
412
|
|
|
178
413
|
await emit(ctx, {
|
|
179
414
|
type: "response",
|
|
180
|
-
body: `
|
|
415
|
+
body: `[3/3 Audit] ${result.success ? "Complete" : "Failed"} (${elapsed(t0)}). ` +
|
|
416
|
+
`All stages done for ${ctx.issue.identifier}. Plan, implementation, and audit posted as comments.`,
|
|
181
417
|
});
|
|
182
418
|
}
|
|
183
419
|
|
|
184
|
-
//
|
|
420
|
+
// ---------------------------------------------------------------------------
|
|
421
|
+
// Full Pipeline
|
|
422
|
+
// ---------------------------------------------------------------------------
|
|
423
|
+
//
|
|
424
|
+
// Runs all three stages sequentially: plan → implement → audit.
|
|
425
|
+
// Assignment is the trigger AND the approval — no pause between stages.
|
|
426
|
+
// Each stage's result feeds into the next. If any stage fails, the
|
|
427
|
+
// pipeline stops and reports the error.
|
|
185
428
|
|
|
186
429
|
export async function runFullPipeline(ctx: PipelineContext): Promise<void> {
|
|
430
|
+
const t0 = Date.now();
|
|
431
|
+
const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
|
|
432
|
+
const codingConfig = loadCodingConfig();
|
|
433
|
+
const codingBackend = resolveCodingBackend(codingConfig);
|
|
434
|
+
|
|
435
|
+
ctx.api.logger.info(
|
|
436
|
+
`${TAG(ctx)} === PIPELINE START === ` +
|
|
437
|
+
`agent=${ctx.agentId}, agent_model=${agentModel}, ` +
|
|
438
|
+
`coding_cli=${codingBackend}, tier=${ctx.tier ?? "unknown"}, ` +
|
|
439
|
+
`worktree=${ctx.worktreePath ?? "none"}, ` +
|
|
440
|
+
`branch=${ctx.codexBranch ?? "none"}, ` +
|
|
441
|
+
`session=${ctx.agentSessionId}`,
|
|
442
|
+
);
|
|
443
|
+
|
|
444
|
+
// Register active session so tools (code_run) can resolve it
|
|
445
|
+
setActiveSession({
|
|
446
|
+
agentSessionId: ctx.agentSessionId,
|
|
447
|
+
issueIdentifier: ctx.issue.identifier,
|
|
448
|
+
issueId: ctx.issue.id,
|
|
449
|
+
agentId: ctx.agentId,
|
|
450
|
+
startedAt: Date.now(),
|
|
451
|
+
});
|
|
452
|
+
|
|
453
|
+
await emit(ctx, {
|
|
454
|
+
type: "thought",
|
|
455
|
+
body: `Pipeline started for ${ctx.issue.identifier} — ` +
|
|
456
|
+
`agent: ${ctx.agentId} (${agentModel}), ` +
|
|
457
|
+
`coding: ${codingBackend}, ` +
|
|
458
|
+
`tier: ${ctx.tier ?? "unknown"}`,
|
|
459
|
+
});
|
|
460
|
+
|
|
187
461
|
try {
|
|
188
462
|
// Stage 1: Plan
|
|
189
463
|
const plan = await runPlannerStage(ctx);
|
|
190
|
-
if (!plan)
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
}
|
|
464
|
+
if (!plan) {
|
|
465
|
+
ctx.api.logger.error(`${TAG(ctx)} planner produced no plan — aborting after ${elapsed(t0)}`);
|
|
466
|
+
await emit(ctx, {
|
|
467
|
+
type: "error",
|
|
468
|
+
body: `Pipeline aborted — planning stage failed after ${elapsed(t0)}. No plan produced.`,
|
|
469
|
+
});
|
|
470
|
+
return;
|
|
471
|
+
}
|
|
199
472
|
|
|
200
|
-
export async function resumePipeline(ctx: PipelineContext, plan: string): Promise<void> {
|
|
201
|
-
try {
|
|
202
473
|
// Stage 2: Implement
|
|
203
474
|
const implResult = await runImplementorStage(ctx, plan);
|
|
204
|
-
if (!implResult)
|
|
475
|
+
if (!implResult) {
|
|
476
|
+
ctx.api.logger.error(`${TAG(ctx)} implementor failed — aborting after ${elapsed(t0)}`);
|
|
477
|
+
await emit(ctx, {
|
|
478
|
+
type: "error",
|
|
479
|
+
body: `Pipeline aborted — implementation stage failed after ${elapsed(t0)}.`,
|
|
480
|
+
});
|
|
481
|
+
return;
|
|
482
|
+
}
|
|
205
483
|
|
|
206
484
|
// Stage 3: Audit
|
|
207
485
|
await runAuditorStage(ctx, plan, implResult);
|
|
486
|
+
|
|
487
|
+
ctx.api.logger.info(
|
|
488
|
+
`${TAG(ctx)} === PIPELINE COMPLETE === total time: ${elapsed(t0)}`,
|
|
489
|
+
);
|
|
208
490
|
} catch (err) {
|
|
209
|
-
ctx.api.logger.error(
|
|
210
|
-
await emit(ctx, {
|
|
491
|
+
ctx.api.logger.error(`${TAG(ctx)} === PIPELINE ERROR === after ${elapsed(t0)}: ${err}`);
|
|
492
|
+
await emit(ctx, {
|
|
493
|
+
type: "error",
|
|
494
|
+
body: `Pipeline crashed after ${elapsed(t0)}: ${String(err).slice(0, 400)}`,
|
|
495
|
+
});
|
|
496
|
+
} finally {
|
|
497
|
+
clearActiveSession(ctx.issue.id);
|
|
211
498
|
}
|
|
212
499
|
}
|