@calltelemetry/openclaw-linear 0.4.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/pipeline.ts CHANGED
@@ -2,6 +2,12 @@ import type { OpenClawPluginApi } from "openclaw/plugin-sdk";
2
2
  import type { LinearAgentApi, ActivityContent } from "./linear-api.js";
3
3
  import { runAgent } from "./agent.js";
4
4
  import { setActiveSession, clearActiveSession } from "./active-session.js";
5
+ import type { Tier } from "./dispatch-state.js";
6
+ import { runCodex } from "./codex-tool.js";
7
+ import { runClaude } from "./claude-tool.js";
8
+ import { runGemini } from "./gemini-tool.js";
9
+ import { resolveCodingBackend, loadCodingConfig, type CodingBackend } from "./code-tool.js";
10
+ import type { CliResult } from "./cli-shared.js";
5
11
 
6
12
  export interface PipelineContext {
7
13
  api: OpenClawPluginApi;
@@ -19,28 +25,74 @@ export interface PipelineContext {
19
25
  worktreePath?: string | null;
20
26
  /** Codex branch name, e.g. codex/UAT-123 */
21
27
  codexBranch?: string | null;
28
+ /** Complexity tier selected by tier assessment */
29
+ tier?: Tier;
30
+ /** Tier model ID — for display/tracking only, NOT passed to coding CLI */
31
+ model?: string;
22
32
  }
23
33
 
34
+ // ---------------------------------------------------------------------------
35
+ // Helpers
36
+ // ---------------------------------------------------------------------------
37
+
24
38
  function emit(ctx: PipelineContext, content: ActivityContent): Promise<void> {
25
39
  return ctx.linearApi.emitActivity(ctx.agentSessionId, content).catch((err) => {
26
- ctx.api.logger.error(`Failed to emit activity: ${err}`);
40
+ ctx.api.logger.error(`[${ctx.issue.identifier}] emit failed: ${err}`);
27
41
  });
28
42
  }
29
43
 
44
+ /** Resolve the agent's model string from config for logging/display. */
45
+ function resolveAgentModel(api: OpenClawPluginApi, agentId: string): string {
46
+ try {
47
+ const config = (api as any).runtime?.config?.getCachedConfig?.() ?? {};
48
+ const agents = config?.agents?.list as Array<Record<string, any>> | undefined;
49
+ const entry = agents?.find((a) => a.id === agentId);
50
+ const modelRef: string =
51
+ entry?.model?.primary ??
52
+ config?.agents?.defaults?.model?.primary ??
53
+ "unknown";
54
+ // Strip provider prefix for display: "openrouter/moonshotai/kimi-k2.5" → "kimi-k2.5"
55
+ const parts = modelRef.split("/");
56
+ return parts.length > 1 ? parts.slice(1).join("/") : modelRef;
57
+ } catch {
58
+ return "unknown";
59
+ }
60
+ }
61
+
62
+ function elapsed(startMs: number): string {
63
+ const sec = ((Date.now() - startMs) / 1000).toFixed(1);
64
+ return `${sec}s`;
65
+ }
66
+
30
67
  function toolContext(ctx: PipelineContext): string {
31
- return [
32
- `\n## Tool Context`,
33
- `When calling \`code_run\`, you MUST pass these parameters:`,
34
- `- \`agentSessionId\`: \`"${ctx.agentSessionId}"\``,
35
- `- \`issueIdentifier\`: \`"${ctx.issue.identifier}"\``,
36
- `This enables real-time progress streaming to Linear and isolated worktree creation.`,
37
- ].join("\n");
68
+ const lines = [
69
+ `\n## code_run Tool`,
70
+ `When calling \`code_run\`, pass these parameters:`,
71
+ ];
72
+ lines.push(`- \`prompt\`: describe what to implement (be specific — file paths, function names, expected behavior)`);
73
+ if (ctx.worktreePath) {
74
+ lines.push(`- \`workingDir\`: \`"${ctx.worktreePath}"\``);
75
+ }
76
+ // Don't suggest model override — each coding CLI uses its own configured model
77
+ lines.push(`Progress streams to Linear automatically. The worktree is an isolated git branch for this issue.`);
78
+ return lines.join("\n");
38
79
  }
39
80
 
40
- // ── Stage 1: Planner ───────────────────────────────────────────────
81
+ const TAG = (ctx: PipelineContext) => `Pipeline [${ctx.issue.identifier}]`;
82
+
83
+ // ---------------------------------------------------------------------------
84
+ // Stage 1: Planner
85
+ // ---------------------------------------------------------------------------
41
86
 
42
87
  export async function runPlannerStage(ctx: PipelineContext): Promise<string | null> {
43
- await emit(ctx, { type: "thought", body: `Analyzing issue ${ctx.issue.identifier}...` });
88
+ const t0 = Date.now();
89
+ const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
90
+
91
+ ctx.api.logger.info(`${TAG(ctx)} stage 1/3: planner starting (agent=${ctx.agentId}, model=${agentModel})`);
92
+ await emit(ctx, {
93
+ type: "thought",
94
+ body: `[1/3 Plan] Analyzing ${ctx.issue.identifier} with ${ctx.agentId} (${agentModel})...`,
95
+ });
44
96
 
45
97
  const issueDetails = await ctx.linearApi.getIssueDetails(ctx.issue.id).catch(() => null);
46
98
 
@@ -73,98 +125,236 @@ IMPORTANT: Do NOT call code_run or any coding tools. Your job is ONLY to analyze
73
125
 
74
126
  Output ONLY the plan, nothing else.`;
75
127
 
76
- await emit(ctx, { type: "action", action: "Planning", parameter: ctx.issue.identifier });
128
+ await emit(ctx, {
129
+ type: "action",
130
+ action: "Planning",
131
+ parameter: `${ctx.issue.identifier} — agent: ${ctx.agentId} (${agentModel})`,
132
+ });
133
+
134
+ const sessionId = `linear-plan-${ctx.agentSessionId}`;
135
+ ctx.api.logger.info(`${TAG(ctx)} planner: spawning agent session=${sessionId}`);
77
136
 
78
137
  const result = await runAgent({
79
138
  api: ctx.api,
80
139
  agentId: ctx.agentId,
81
- sessionId: `linear-plan-${ctx.agentSessionId}`,
140
+ sessionId,
82
141
  message,
83
142
  timeoutMs: 5 * 60_000,
84
-
85
143
  });
86
144
 
87
145
  if (!result.success) {
88
- await emit(ctx, { type: "error", body: `Planning failed: ${result.output.slice(0, 500)}` });
146
+ ctx.api.logger.error(`${TAG(ctx)} planner failed after ${elapsed(t0)}: ${result.output.slice(0, 300)}`);
147
+ await emit(ctx, {
148
+ type: "error",
149
+ body: `[1/3 Plan] Failed after ${elapsed(t0)}: ${result.output.slice(0, 400)}`,
150
+ });
89
151
  return null;
90
152
  }
91
153
 
92
154
  const plan = result.output;
155
+ ctx.api.logger.info(`${TAG(ctx)} planner completed in ${elapsed(t0)} (${plan.length} chars)`);
93
156
 
94
157
  // Post plan as a Linear comment
95
158
  await ctx.linearApi.createComment(
96
159
  ctx.issue.id,
97
- `## Implementation Plan\n\n${plan}\n\n---\n*Reply to this comment to approve the plan and begin implementation.*`,
160
+ `## Implementation Plan\n\n${plan}\n\n---\n*Proceeding to implementation...*`,
98
161
  );
99
162
 
100
163
  await emit(ctx, {
101
- type: "elicitation",
102
- body: "I've posted an implementation plan as a comment. Please review and reply to approve.",
164
+ type: "action",
165
+ action: "Plan complete",
166
+ parameter: `${ctx.issue.identifier} — ${elapsed(t0)}, moving to implementation`,
103
167
  });
104
168
 
105
169
  return plan;
106
170
  }
107
171
 
108
- // ── Stage 2: Implementor ──────────────────────────────────────────
172
+ // ---------------------------------------------------------------------------
173
+ // Stage 2: Implementor
174
+ // ---------------------------------------------------------------------------
175
+ //
176
+ // Deterministic: pipeline CODE calls the coding CLI directly.
177
+ // The agent model only evaluates results between runs.
178
+
179
+ const BACKEND_RUNNERS: Record<
180
+ CodingBackend,
181
+ (api: OpenClawPluginApi, params: any, pluginConfig?: Record<string, unknown>) => Promise<CliResult>
182
+ > = {
183
+ codex: runCodex,
184
+ claude: runClaude,
185
+ gemini: runGemini,
186
+ };
109
187
 
110
188
  export async function runImplementorStage(
111
189
  ctx: PipelineContext,
112
190
  plan: string,
113
191
  ): Promise<string | null> {
114
- await emit(ctx, { type: "thought", body: "Plan approved. Starting implementation..." });
192
+ const t0 = Date.now();
193
+ const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
194
+ const pluginConfig = (ctx.api as any).pluginConfig as Record<string, unknown> | undefined;
195
+
196
+ // Resolve coding backend from config (coding-tools.json)
197
+ const codingConfig = loadCodingConfig();
198
+ const backend = resolveCodingBackend(codingConfig);
199
+ const runner = BACKEND_RUNNERS[backend];
200
+ const backendName = backend.charAt(0).toUpperCase() + backend.slice(1);
201
+
202
+ ctx.api.logger.info(
203
+ `${TAG(ctx)} stage 2/3: implementor starting ` +
204
+ `(coding_cli=${backendName}, tier=${ctx.tier ?? "unknown"}, ` +
205
+ `worktree=${ctx.worktreePath ?? "default"}, ` +
206
+ `eval_agent=${ctx.agentId}, eval_model=${agentModel})`,
207
+ );
115
208
 
116
- const message = `You are an implementor agent. Execute this plan for issue ${ctx.issue.identifier}.
209
+ await emit(ctx, {
210
+ type: "thought",
211
+ body: `[2/3 Implement] Starting ${backendName} CLI → ${ctx.worktreePath ?? "default workspace"}`,
212
+ });
117
213
 
118
- ## Issue: ${ctx.issue.identifier} ${ctx.issue.title}
214
+ // Build the implementation prompt for the coding CLI
215
+ const codePrompt = [
216
+ `Implement the following plan for issue ${ctx.issue.identifier} — ${ctx.issue.title}.`,
217
+ ``,
218
+ `## Plan`,
219
+ plan,
220
+ ``,
221
+ `## Instructions`,
222
+ `- Follow the plan step by step`,
223
+ `- Create commits for each logical change`,
224
+ `- Run tests if the project has them`,
225
+ `- Stay within scope of the plan`,
226
+ ].join("\n");
119
227
 
120
- ## Approved Plan:
121
- ${plan}
228
+ await emit(ctx, {
229
+ type: "action",
230
+ action: `Running ${backendName}`,
231
+ parameter: `${ctx.tier ?? "unknown"} tier — worktree: ${ctx.worktreePath ?? "default"}`,
232
+ });
122
233
 
123
- ## Instructions
124
- 1. Follow the plan step by step
125
- 2. Use \`code_run\` to write code, create files, run tests, and refactor — it works in an isolated git worktree
126
- 3. Use \`spawn_agent\` / \`ask_agent\` to delegate to other crew agents if needed
127
- 4. Create commits for each logical change
128
- 5. If the plan involves creating a PR, do so
129
- 6. Report what you did, any files changed, and the worktree/branch path
130
- ${toolContext(ctx)}
234
+ // Call the coding CLI directly — deterministic, not LLM choice.
235
+ // NOTE: Do NOT pass ctx.model here. The tier model (e.g. anthropic/claude-sonnet-4-6)
236
+ // is for tracking/display only. Each coding CLI uses its own configured model.
237
+ ctx.api.logger.info(`${TAG(ctx)} implementor: invoking ${backendName} CLI (no model override CLI uses its own config)`);
238
+ const cliStart = Date.now();
131
239
 
132
- Be thorough but stay within scope of the plan.`;
240
+ const codeResult = await runner(ctx.api, {
241
+ prompt: codePrompt,
242
+ workingDir: ctx.worktreePath ?? undefined,
243
+ timeoutMs: 10 * 60_000,
244
+ }, pluginConfig);
245
+
246
+ const cliElapsed = elapsed(cliStart);
247
+
248
+ if (!codeResult.success) {
249
+ ctx.api.logger.warn(
250
+ `${TAG(ctx)} implementor: ${backendName} CLI failed after ${cliElapsed} — ` +
251
+ `error: ${codeResult.error ?? "unknown"}, output: ${codeResult.output.slice(0, 300)}`,
252
+ );
253
+ await emit(ctx, {
254
+ type: "error",
255
+ body: `[2/3 Implement] ${backendName} failed after ${cliElapsed}: ${(codeResult.error ?? codeResult.output).slice(0, 400)}`,
256
+ });
257
+
258
+ // Ask the agent to evaluate the failure
259
+ ctx.api.logger.info(`${TAG(ctx)} implementor: spawning ${ctx.agentId} (${agentModel}) to evaluate failure`);
260
+ await emit(ctx, {
261
+ type: "action",
262
+ action: "Evaluating failure",
263
+ parameter: `${ctx.agentId} (${agentModel}) analyzing ${backendName} error`,
264
+ });
265
+
266
+ const evalResult = await runAgent({
267
+ api: ctx.api,
268
+ agentId: ctx.agentId,
269
+ sessionId: `linear-impl-eval-${ctx.agentSessionId}`,
270
+ message: `${backendName} failed to implement the plan for ${ctx.issue.identifier}.\n\n## Plan\n${plan}\n\n## ${backendName} Output\n${codeResult.output.slice(0, 3000)}\n\n## Error\n${codeResult.error ?? "unknown"}\n\nAnalyze the failure. Summarize what went wrong and suggest next steps. Be concise.`,
271
+ timeoutMs: 2 * 60_000,
272
+ });
273
+
274
+ const failureSummary = evalResult.success
275
+ ? evalResult.output
276
+ : `Implementation failed and evaluation also failed: ${codeResult.output.slice(0, 500)}`;
277
+
278
+ await ctx.linearApi.createComment(
279
+ ctx.issue.id,
280
+ `## Implementation Failed\n\n**Backend:** ${backendName} (ran for ${cliElapsed})\n**Tier:** ${ctx.tier ?? "unknown"}\n\n${failureSummary}`,
281
+ );
133
282
 
134
- await emit(ctx, { type: "action", action: "Calling coding provider", parameter: "Codex" });
283
+ return null;
284
+ }
135
285
 
136
- const result = await runAgent({
286
+ ctx.api.logger.info(`${TAG(ctx)} implementor: ${backendName} CLI completed in ${cliElapsed} (${codeResult.output.length} chars output)`);
287
+
288
+ // Ask the agent to evaluate the result
289
+ const evalMessage = [
290
+ `${backendName} completed implementation for ${ctx.issue.identifier}. Evaluate the result.`,
291
+ ``,
292
+ `## Original Plan`,
293
+ plan,
294
+ ``,
295
+ `## ${backendName} Output`,
296
+ codeResult.output.slice(0, 5000),
297
+ ``,
298
+ `## Worktree`,
299
+ `Path: ${ctx.worktreePath ?? "default"}`,
300
+ `Branch: ${ctx.codexBranch ?? "unknown"}`,
301
+ ``,
302
+ `Summarize what was implemented, any issues found, and whether the plan was fully executed. Be concise.`,
303
+ ].join("\n");
304
+
305
+ ctx.api.logger.info(`${TAG(ctx)} implementor: spawning ${ctx.agentId} (${agentModel}) to evaluate results`);
306
+ await emit(ctx, {
307
+ type: "action",
308
+ action: "Evaluating results",
309
+ parameter: `${ctx.agentId} (${agentModel}) reviewing ${backendName} output`,
310
+ });
311
+
312
+ const evalStart = Date.now();
313
+ const evalResult = await runAgent({
137
314
  api: ctx.api,
138
315
  agentId: ctx.agentId,
139
- sessionId: `linear-impl-${ctx.agentSessionId}`,
140
- message,
141
- timeoutMs: 10 * 60_000,
142
-
316
+ sessionId: `linear-impl-eval-${ctx.agentSessionId}`,
317
+ message: evalMessage,
318
+ timeoutMs: 3 * 60_000,
143
319
  });
144
320
 
145
- if (!result.success) {
146
- await emit(ctx, { type: "error", body: `Implementation failed: ${result.output.slice(0, 500)}` });
147
- return null;
148
- }
321
+ const summary = evalResult.success
322
+ ? evalResult.output
323
+ : `Implementation completed but evaluation failed. ${backendName} output:\n${codeResult.output.slice(0, 2000)}`;
324
+
325
+ ctx.api.logger.info(
326
+ `${TAG(ctx)} implementor: evaluation ${evalResult.success ? "succeeded" : "failed"} in ${elapsed(evalStart)}, ` +
327
+ `total stage time: ${elapsed(t0)}`,
328
+ );
149
329
 
150
- // Try to extract worktree info from agent output (Codex results include it)
151
- const worktreeMatch = result.output.match(/worktreePath["\s:]+([/\w.-]+)/);
152
- const branchMatch = result.output.match(/branch["\s:]+([/\w.-]+)/);
153
- if (worktreeMatch) ctx.worktreePath = worktreeMatch[1];
154
- if (branchMatch) ctx.codexBranch = branchMatch[1];
330
+ await emit(ctx, {
331
+ type: "action",
332
+ action: "Implementation complete",
333
+ parameter: `${backendName} ${cliElapsed} + eval ${elapsed(evalStart)} = ${elapsed(t0)} total`,
334
+ });
155
335
 
156
- await emit(ctx, { type: "action", action: "Implementation complete", parameter: ctx.issue.identifier });
157
- return result.output;
336
+ return summary;
158
337
  }
159
338
 
160
- // ── Stage 3: Auditor ──────────────────────────────────────────────
339
+ // ---------------------------------------------------------------------------
340
+ // Stage 3: Auditor
341
+ // ---------------------------------------------------------------------------
161
342
 
162
343
  export async function runAuditorStage(
163
344
  ctx: PipelineContext,
164
345
  plan: string,
165
346
  implResult: string,
166
347
  ): Promise<void> {
167
- await emit(ctx, { type: "thought", body: "Auditing implementation against the plan..." });
348
+ const t0 = Date.now();
349
+ const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
350
+
351
+ ctx.api.logger.info(
352
+ `${TAG(ctx)} stage 3/3: auditor starting (agent=${ctx.agentId}, model=${agentModel})`,
353
+ );
354
+ await emit(ctx, {
355
+ type: "thought",
356
+ body: `[3/3 Audit] Reviewing implementation with ${ctx.agentId} (${agentModel})...`,
357
+ });
168
358
 
169
359
  const worktreeInfo = ctx.worktreePath
170
360
  ? `\n## Worktree\nCode changes are at: \`${ctx.worktreePath}\` (branch: \`${ctx.codexBranch ?? "unknown"}\`)\n`
@@ -190,18 +380,30 @@ ${toolContext(ctx)}
190
380
 
191
381
  Output ONLY the audit summary.`;
192
382
 
193
- await emit(ctx, { type: "action", action: "Auditing", parameter: ctx.issue.identifier });
383
+ const sessionId = `linear-audit-${ctx.agentSessionId}`;
384
+ ctx.api.logger.info(`${TAG(ctx)} auditor: spawning agent session=${sessionId}`);
385
+
386
+ await emit(ctx, {
387
+ type: "action",
388
+ action: "Auditing",
389
+ parameter: `${ctx.issue.identifier} — agent: ${ctx.agentId} (${agentModel})`,
390
+ });
194
391
 
195
392
  const result = await runAgent({
196
393
  api: ctx.api,
197
394
  agentId: ctx.agentId,
198
- sessionId: `linear-audit-${ctx.agentSessionId}`,
395
+ sessionId,
199
396
  message,
200
397
  timeoutMs: 5 * 60_000,
201
-
202
398
  });
203
399
 
204
- const auditSummary = result.success ? result.output : `Audit failed: ${result.output.slice(0, 500)}`;
400
+ const auditSummary = result.success
401
+ ? result.output
402
+ : `Audit failed: ${result.output.slice(0, 500)}`;
403
+
404
+ ctx.api.logger.info(
405
+ `${TAG(ctx)} auditor: ${result.success ? "completed" : "failed"} in ${elapsed(t0)} (${auditSummary.length} chars)`,
406
+ );
205
407
 
206
408
  await ctx.linearApi.createComment(
207
409
  ctx.issue.id,
@@ -210,13 +412,35 @@ Output ONLY the audit summary.`;
210
412
 
211
413
  await emit(ctx, {
212
414
  type: "response",
213
- body: `Completed work on ${ctx.issue.identifier}. Plan, implementation, and audit posted as comments.`,
415
+ body: `[3/3 Audit] ${result.success ? "Complete" : "Failed"} (${elapsed(t0)}). ` +
416
+ `All stages done for ${ctx.issue.identifier}. Plan, implementation, and audit posted as comments.`,
214
417
  });
215
418
  }
216
419
 
217
- // ── Full Pipeline ─────────────────────────────────────────────────
420
+ // ---------------------------------------------------------------------------
421
+ // Full Pipeline
422
+ // ---------------------------------------------------------------------------
423
+ //
424
+ // Runs all three stages sequentially: plan → implement → audit.
425
+ // Assignment is the trigger AND the approval — no pause between stages.
426
+ // Each stage's result feeds into the next. If any stage fails, the
427
+ // pipeline stops and reports the error.
218
428
 
219
429
  export async function runFullPipeline(ctx: PipelineContext): Promise<void> {
430
+ const t0 = Date.now();
431
+ const agentModel = resolveAgentModel(ctx.api, ctx.agentId);
432
+ const codingConfig = loadCodingConfig();
433
+ const codingBackend = resolveCodingBackend(codingConfig);
434
+
435
+ ctx.api.logger.info(
436
+ `${TAG(ctx)} === PIPELINE START === ` +
437
+ `agent=${ctx.agentId}, agent_model=${agentModel}, ` +
438
+ `coding_cli=${codingBackend}, tier=${ctx.tier ?? "unknown"}, ` +
439
+ `worktree=${ctx.worktreePath ?? "none"}, ` +
440
+ `branch=${ctx.codexBranch ?? "none"}, ` +
441
+ `session=${ctx.agentSessionId}`,
442
+ );
443
+
220
444
  // Register active session so tools (code_run) can resolve it
221
445
  setActiveSession({
222
446
  agentSessionId: ctx.agentSessionId,
@@ -226,44 +450,49 @@ export async function runFullPipeline(ctx: PipelineContext): Promise<void> {
226
450
  startedAt: Date.now(),
227
451
  });
228
452
 
453
+ await emit(ctx, {
454
+ type: "thought",
455
+ body: `Pipeline started for ${ctx.issue.identifier} — ` +
456
+ `agent: ${ctx.agentId} (${agentModel}), ` +
457
+ `coding: ${codingBackend}, ` +
458
+ `tier: ${ctx.tier ?? "unknown"}`,
459
+ });
460
+
229
461
  try {
230
462
  // Stage 1: Plan
231
463
  const plan = await runPlannerStage(ctx);
232
464
  if (!plan) {
233
- clearActiveSession(ctx.issue.id);
465
+ ctx.api.logger.error(`${TAG(ctx)} planner produced no plan — aborting after ${elapsed(t0)}`);
466
+ await emit(ctx, {
467
+ type: "error",
468
+ body: `Pipeline aborted — planning stage failed after ${elapsed(t0)}. No plan produced.`,
469
+ });
234
470
  return;
235
471
  }
236
472
 
237
- // Pipeline pauses here — user must reply to approve.
238
- // The "prompted" / "created" webhook will call resumePipeline().
239
- // Active session stays registered until resume completes.
240
- } catch (err) {
241
- clearActiveSession(ctx.issue.id);
242
- ctx.api.logger.error(`Pipeline error: ${err}`);
243
- await emit(ctx, { type: "error", body: `Pipeline failed: ${String(err).slice(0, 500)}` });
244
- }
245
- }
246
-
247
- export async function resumePipeline(ctx: PipelineContext, plan: string): Promise<void> {
248
- // Register (or update) active session for tool resolution
249
- setActiveSession({
250
- agentSessionId: ctx.agentSessionId,
251
- issueIdentifier: ctx.issue.identifier,
252
- issueId: ctx.issue.id,
253
- agentId: ctx.agentId,
254
- startedAt: Date.now(),
255
- });
256
-
257
- try {
258
473
  // Stage 2: Implement
259
474
  const implResult = await runImplementorStage(ctx, plan);
260
- if (!implResult) return;
475
+ if (!implResult) {
476
+ ctx.api.logger.error(`${TAG(ctx)} implementor failed — aborting after ${elapsed(t0)}`);
477
+ await emit(ctx, {
478
+ type: "error",
479
+ body: `Pipeline aborted — implementation stage failed after ${elapsed(t0)}.`,
480
+ });
481
+ return;
482
+ }
261
483
 
262
484
  // Stage 3: Audit
263
485
  await runAuditorStage(ctx, plan, implResult);
486
+
487
+ ctx.api.logger.info(
488
+ `${TAG(ctx)} === PIPELINE COMPLETE === total time: ${elapsed(t0)}`,
489
+ );
264
490
  } catch (err) {
265
- ctx.api.logger.error(`Pipeline error: ${err}`);
266
- await emit(ctx, { type: "error", body: `Pipeline failed: ${String(err).slice(0, 500)}` });
491
+ ctx.api.logger.error(`${TAG(ctx)} === PIPELINE ERROR === after ${elapsed(t0)}: ${err}`);
492
+ await emit(ctx, {
493
+ type: "error",
494
+ body: `Pipeline crashed after ${elapsed(t0)}: ${String(err).slice(0, 400)}`,
495
+ });
267
496
  } finally {
268
497
  clearActiveSession(ctx.issue.id);
269
498
  }
@@ -0,0 +1,157 @@
1
+ /**
2
+ * tier-assess.ts — LLM-based complexity assessment for Linear issues.
3
+ *
4
+ * Uses runAgent() with the agent's configured model (e.g. kimi-k2.5)
5
+ * to assess issue complexity. The agent model handles orchestration —
6
+ * it never calls coding CLIs directly.
7
+ *
8
+ * Cost: one short agent turn (~500 tokens). Latency: ~2-5s.
9
+ */
10
+ import { readFileSync } from "node:fs";
11
+ import { join } from "node:path";
12
+ import type { OpenClawPluginApi } from "openclaw/plugin-sdk";
13
+ import type { Tier } from "./dispatch-state.js";
14
+
15
+ // ---------------------------------------------------------------------------
16
+ // Tier → Model mapping
17
+ // ---------------------------------------------------------------------------
18
+
19
+ export const TIER_MODELS: Record<Tier, string> = {
20
+ junior: "anthropic/claude-haiku-4-5",
21
+ medior: "anthropic/claude-sonnet-4-6",
22
+ senior: "anthropic/claude-opus-4-6",
23
+ };
24
+
25
+ export interface TierAssessment {
26
+ tier: Tier;
27
+ model: string;
28
+ reasoning: string;
29
+ }
30
+
31
+ export interface IssueContext {
32
+ identifier: string;
33
+ title: string;
34
+ description?: string | null;
35
+ labels?: string[];
36
+ commentCount?: number;
37
+ }
38
+
39
+ // ---------------------------------------------------------------------------
40
+ // Assessment
41
+ // ---------------------------------------------------------------------------
42
+
43
+ const ASSESS_PROMPT = `You are a complexity assessor. Assess this issue and respond ONLY with JSON.
44
+
45
+ Tiers:
46
+ - junior: typos, copy changes, config tweaks, simple CSS, env var additions
47
+ - medior: features, bugfixes, moderate refactoring, adding tests, API changes
48
+ - senior: architecture changes, database migrations, security fixes, multi-service coordination
49
+
50
+ Consider:
51
+ 1. How many files/services are likely affected?
52
+ 2. Does it touch auth, data, or external APIs? (higher risk → higher tier)
53
+ 3. Is the description clear and actionable?
54
+ 4. Are there dependencies or unknowns?
55
+
56
+ Respond ONLY with: {"tier":"junior|medior|senior","reasoning":"one sentence"}`;
57
+
58
+ /**
59
+ * Assess issue complexity using the agent's configured model.
60
+ *
61
+ * Falls back to "medior" if the agent call fails or returns invalid JSON.
62
+ */
63
+ export async function assessTier(
64
+ api: OpenClawPluginApi,
65
+ issue: IssueContext,
66
+ agentId?: string,
67
+ ): Promise<TierAssessment> {
68
+ const issueText = [
69
+ `Issue: ${issue.identifier} — ${issue.title}`,
70
+ issue.description ? `Description: ${issue.description.slice(0, 1500)}` : "",
71
+ issue.labels?.length ? `Labels: ${issue.labels.join(", ")}` : "",
72
+ issue.commentCount != null ? `Comments: ${issue.commentCount}` : "",
73
+ ].filter(Boolean).join("\n");
74
+
75
+ const message = `${ASSESS_PROMPT}\n\n${issueText}`;
76
+
77
+ try {
78
+ const { runAgent } = await import("./agent.js");
79
+ const result = await runAgent({
80
+ api,
81
+ agentId: agentId ?? resolveDefaultAgent(api),
82
+ sessionId: `tier-assess-${issue.identifier}-${Date.now()}`,
83
+ message,
84
+ timeoutMs: 30_000, // 30s — this should be fast
85
+ });
86
+
87
+ // Try to parse assessment from output regardless of success flag.
88
+ // runAgent may report success:false (non-zero exit code) even when
89
+ // the agent produced valid JSON output — e.g. agent exited with
90
+ // signal but wrote the response before terminating.
91
+ if (result.output) {
92
+ const parsed = parseAssessment(result.output);
93
+ if (parsed) {
94
+ api.logger.info(`Tier assessment for ${issue.identifier}: ${parsed.tier} — ${parsed.reasoning} (agent success=${result.success})`);
95
+ return parsed;
96
+ }
97
+ }
98
+
99
+ if (!result.success) {
100
+ api.logger.warn(`Tier assessment agent failed for ${issue.identifier}: ${result.output.slice(0, 200)}`);
101
+ } else {
102
+ api.logger.warn(`Tier assessment for ${issue.identifier}: could not parse response: ${result.output.slice(0, 200)}`);
103
+ }
104
+ } catch (err) {
105
+ api.logger.warn(`Tier assessment error for ${issue.identifier}: ${err}`);
106
+ }
107
+
108
+ // Fallback: medior is the safest default
109
+ const fallback: TierAssessment = {
110
+ tier: "medior",
111
+ model: TIER_MODELS.medior,
112
+ reasoning: "Assessment failed — defaulting to medior",
113
+ };
114
+ api.logger.info(`Tier assessment fallback for ${issue.identifier}: medior`);
115
+ return fallback;
116
+ }
117
+
118
+ // ---------------------------------------------------------------------------
119
+ // Helpers
120
+ // ---------------------------------------------------------------------------
121
+
122
+ function resolveDefaultAgent(api: OpenClawPluginApi): string {
123
+ // Use the plugin's configured default agent (same one that runs the pipeline)
124
+ const fromConfig = (api as any).pluginConfig?.defaultAgentId;
125
+ if (typeof fromConfig === "string" && fromConfig) return fromConfig;
126
+
127
+ // Fall back to isDefault in agent profiles
128
+ try {
129
+ const profilesPath = join(process.env.HOME ?? "/home/claw", ".openclaw", "agent-profiles.json");
130
+ const raw = readFileSync(profilesPath, "utf8");
131
+ const profiles = JSON.parse(raw).agents ?? {};
132
+ const defaultAgent = Object.entries(profiles).find(([, p]: [string, any]) => p.isDefault);
133
+ if (defaultAgent) return defaultAgent[0];
134
+ } catch { /* fall through */ }
135
+
136
+ return "zoe";
137
+ }
138
+
139
+ function parseAssessment(raw: string): TierAssessment | null {
140
+ // Extract JSON from the response (may have markdown wrapping)
141
+ const jsonMatch = raw.match(/\{[^}]+\}/);
142
+ if (!jsonMatch) return null;
143
+
144
+ try {
145
+ const parsed = JSON.parse(jsonMatch[0]);
146
+ const tier = parsed.tier as string;
147
+ if (tier !== "junior" && tier !== "medior" && tier !== "senior") return null;
148
+
149
+ return {
150
+ tier: tier as Tier,
151
+ model: TIER_MODELS[tier as Tier],
152
+ reasoning: parsed.reasoning ?? "no reasoning provided",
153
+ };
154
+ } catch {
155
+ return null;
156
+ }
157
+ }