xforce 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,2599 @@
1
+ // src/pipeline/orchestrator.ts
2
+ import { nanoid } from "nanoid";
3
+ import { rm } from "fs/promises";
4
+ import ora from "ora";
5
+
6
+ // src/core/logger.ts
7
+ import pino from "pino";
8
+ var isCI = !!(process.env.CI || process.env.GITHUB_ACTIONS);
9
+ var logger = pino({
10
+ level: process.env.LOG_LEVEL ?? (isCI ? "info" : "debug"),
11
+ ...isCI ? {} : {
12
+ transport: {
13
+ target: "pino-pretty",
14
+ options: {
15
+ colorize: true,
16
+ translateTime: "HH:MM:ss",
17
+ ignore: "pid,hostname"
18
+ }
19
+ }
20
+ }
21
+ });
22
+ function createChildLogger(name) {
23
+ return logger.child({ component: name });
24
+ }
25
+
26
+ // src/core/config.ts
27
+ import { readFileSync, existsSync } from "fs";
28
+ import { resolve } from "path";
29
+ import { parse as parseYaml } from "yaml";
30
+ import { z } from "zod";
31
+
32
+ // src/core/constants.ts
33
+ var DEFAULT_CONFIG = {
34
+ model: "claude-sonnet-4-5-20250929",
35
+ reviewerModel: "claude-sonnet-4-5-20250929",
36
+ plannerModel: "claude-sonnet-4-5-20250929",
37
+ maxTurns: 25,
38
+ maxReviewCycles: 3,
39
+ maxTestRetries: 2,
40
+ timeoutMinutes: 30,
41
+ budgetPerTaskUsd: 5,
42
+ branchPrefix: "xforce",
43
+ labels: {
44
+ ready: "xforce:ready",
45
+ inProgress: "xforce:in-progress",
46
+ done: "xforce:done",
47
+ failed: "xforce:failed"
48
+ },
49
+ allowedTools: ["Read", "Write", "Edit", "Bash", "Glob", "Grep"],
50
+ enablePlanning: true,
51
+ enableSecurityScan: true
52
+ };
53
+ var CONFIG_FILE_NAMES = [
54
+ "xforce.config.yaml",
55
+ "xforce.config.yml",
56
+ ".xforce/config.yaml",
57
+ ".xforce/config.yml"
58
+ ];
59
+
60
+ // src/core/errors.ts
61
+ var XForceError = class extends Error {
62
+ constructor(message, code) {
63
+ super(message);
64
+ this.code = code;
65
+ this.name = "XForceError";
66
+ }
67
+ };
68
+ var ConfigError = class extends XForceError {
69
+ constructor(message) {
70
+ super(message, "CONFIG_ERROR");
71
+ this.name = "ConfigError";
72
+ }
73
+ };
74
+ var IssueParseError = class extends XForceError {
75
+ constructor(message) {
76
+ super(message, "ISSUE_PARSE_ERROR");
77
+ this.name = "IssueParseError";
78
+ }
79
+ };
80
+ var CodingAgentError = class extends XForceError {
81
+ constructor(message, errors, costUsd) {
82
+ super(message, "CODING_AGENT_ERROR");
83
+ this.errors = errors;
84
+ this.costUsd = costUsd;
85
+ this.name = "CodingAgentError";
86
+ }
87
+ };
88
+ var ReviewerError = class extends XForceError {
89
+ constructor(message) {
90
+ super(message, "REVIEWER_ERROR");
91
+ this.name = "ReviewerError";
92
+ }
93
+ };
94
+ var PipelineError = class extends XForceError {
95
+ constructor(message) {
96
+ super(message, "PIPELINE_ERROR");
97
+ this.name = "PipelineError";
98
+ }
99
+ };
100
+ var TimeoutError = class extends XForceError {
101
+ constructor(message) {
102
+ super(message, "TIMEOUT_ERROR");
103
+ this.name = "TimeoutError";
104
+ }
105
+ };
106
+ function withTimeout(promise, timeoutMs, label = "Operation") {
107
+ return new Promise((resolve2, reject) => {
108
+ const timer = setTimeout(() => {
109
+ reject(new TimeoutError(`${label} timed out after ${Math.round(timeoutMs / 1e3)}s`));
110
+ }, timeoutMs);
111
+ promise.then((result) => {
112
+ clearTimeout(timer);
113
+ resolve2(result);
114
+ }).catch((error) => {
115
+ clearTimeout(timer);
116
+ reject(error);
117
+ });
118
+ });
119
+ }
120
+ var RetryableError = class extends Error {
121
+ constructor(message, retryAfterMs) {
122
+ super(message);
123
+ this.retryAfterMs = retryAfterMs;
124
+ this.name = "RetryableError";
125
+ }
126
+ };
127
+ async function withRetry(fn, options) {
128
+ let lastError;
129
+ for (let attempt = 0; attempt <= options.maxRetries; attempt++) {
130
+ try {
131
+ return await fn();
132
+ } catch (error) {
133
+ lastError = error;
134
+ if (attempt === options.maxRetries) break;
135
+ const delay = error instanceof RetryableError && error.retryAfterMs ? error.retryAfterMs : Math.min(options.baseDelayMs * 2 ** attempt, options.maxDelayMs);
136
+ options.onRetry?.(lastError, attempt + 1);
137
+ await new Promise((resolve2) => setTimeout(resolve2, delay));
138
+ }
139
+ }
140
+ throw lastError;
141
+ }
142
+
143
+ // src/core/config.ts
144
+ var LabelsSchema = z.object({
145
+ ready: z.string().default(DEFAULT_CONFIG.labels.ready),
146
+ inProgress: z.string().default(DEFAULT_CONFIG.labels.inProgress),
147
+ done: z.string().default(DEFAULT_CONFIG.labels.done),
148
+ failed: z.string().default(DEFAULT_CONFIG.labels.failed)
149
+ });
150
+ var AutoMergeRulesSchema = z.object({
151
+ types: z.array(z.enum(["feature", "bugfix", "refactor", "test", "docs"])).default(["bugfix", "test", "docs"]),
152
+ maxSize: z.enum(["xs", "s", "m", "l", "xl"]).default("m"),
153
+ mergeStrategy: z.enum(["squash", "merge", "rebase"]).default("squash"),
154
+ requireCleanSecurityScan: z.boolean().default(true)
155
+ });
156
+ var RepoConfigSchema = z.object({
157
+ owner: z.string().min(1),
158
+ name: z.string().min(1),
159
+ defaultBranch: z.string().default("main"),
160
+ testCommand: z.string().min(1),
161
+ lintCommand: z.string().optional(),
162
+ buildCommand: z.string().optional(),
163
+ runCommand: z.string().optional(),
164
+ claudeMdPath: z.string().optional(),
165
+ maxTurns: z.number().int().positive().optional(),
166
+ maxReviewCycles: z.number().int().min(1).max(10).optional(),
167
+ maxTestRetries: z.number().int().min(0).max(5).optional(),
168
+ budgetPerTaskUsd: z.number().positive().optional(),
169
+ autoMerge: z.boolean().default(false),
170
+ autoMergeRules: AutoMergeRulesSchema.optional(),
171
+ allowedTools: z.array(z.string()).optional(),
172
+ localPath: z.string().optional()
173
+ });
174
+ var DefaultsSchema = z.object({
175
+ model: z.string().default(DEFAULT_CONFIG.model),
176
+ reviewerModel: z.string().default(DEFAULT_CONFIG.reviewerModel),
177
+ plannerModel: z.string().default(DEFAULT_CONFIG.plannerModel),
178
+ maxTurns: z.number().int().positive().default(DEFAULT_CONFIG.maxTurns),
179
+ maxReviewCycles: z.number().int().min(1).max(10).default(DEFAULT_CONFIG.maxReviewCycles),
180
+ maxTestRetries: z.number().int().min(0).max(5).default(DEFAULT_CONFIG.maxTestRetries),
181
+ timeoutMinutes: z.number().int().positive().default(DEFAULT_CONFIG.timeoutMinutes),
182
+ budgetPerTaskUsd: z.number().positive().default(DEFAULT_CONFIG.budgetPerTaskUsd),
183
+ branchPrefix: z.string().default(DEFAULT_CONFIG.branchPrefix),
184
+ labels: LabelsSchema.default({}),
185
+ allowedTools: z.array(z.string()).default([...DEFAULT_CONFIG.allowedTools]),
186
+ enablePlanning: z.boolean().default(DEFAULT_CONFIG.enablePlanning),
187
+ enableSecurityScan: z.boolean().default(DEFAULT_CONFIG.enableSecurityScan)
188
+ });
189
+ var XForceConfigSchema = z.object({
190
+ version: z.literal("1"),
191
+ defaults: DefaultsSchema.default({}),
192
+ repos: z.array(RepoConfigSchema).min(1),
193
+ notifications: z.object({
194
+ slack: z.object({
195
+ webhookUrl: z.string(),
196
+ channels: z.object({
197
+ success: z.string().optional(),
198
+ failure: z.string().optional()
199
+ }).optional()
200
+ }).optional(),
201
+ github: z.object({
202
+ mentionOnFailure: z.array(z.string()).optional(),
203
+ mentionOnReview: z.array(z.string()).optional()
204
+ }).optional()
205
+ }).optional()
206
+ });
207
+ function interpolateEnvVars(value) {
208
+ return value.replace(/\$\{([^}]+)\}/g, (_match, varName) => {
209
+ const envValue = process.env[varName];
210
+ if (envValue === void 0) {
211
+ throw new ConfigError(`Environment variable ${varName} is not set`);
212
+ }
213
+ return envValue;
214
+ });
215
+ }
216
+ function interpolateDeep(obj) {
217
+ if (typeof obj === "string") return interpolateEnvVars(obj);
218
+ if (Array.isArray(obj)) return obj.map(interpolateDeep);
219
+ if (obj !== null && typeof obj === "object") {
220
+ const result = {};
221
+ for (const [key, value] of Object.entries(obj)) {
222
+ result[key] = interpolateDeep(value);
223
+ }
224
+ return result;
225
+ }
226
+ return obj;
227
+ }
228
+ function findConfigFile(basePath) {
229
+ for (const name of CONFIG_FILE_NAMES) {
230
+ const fullPath = resolve(basePath, name);
231
+ if (existsSync(fullPath)) return fullPath;
232
+ }
233
+ return null;
234
+ }
235
+ function loadConfig(configPath) {
236
+ let filePath;
237
+ if (configPath) {
238
+ filePath = resolve(configPath);
239
+ if (!existsSync(filePath)) {
240
+ throw new ConfigError(`Config file not found: ${filePath}`);
241
+ }
242
+ } else {
243
+ const found = findConfigFile(process.cwd());
244
+ if (!found) {
245
+ throw new ConfigError(
246
+ `No config file found. Create xforce.config.yaml or specify --config path.`
247
+ );
248
+ }
249
+ filePath = found;
250
+ }
251
+ const raw = readFileSync(filePath, "utf-8");
252
+ const parsed = parseYaml(raw);
253
+ const interpolated = interpolateDeep(parsed);
254
+ const result = XForceConfigSchema.safeParse(interpolated);
255
+ if (!result.success) {
256
+ const errors = result.error.issues.map((i) => ` - ${i.path.join(".")}: ${i.message}`).join("\n");
257
+ throw new ConfigError(`Invalid config:
258
+ ${errors}`);
259
+ }
260
+ return result.data;
261
+ }
262
+ function resolveRepoConfig(config, owner, name) {
263
+ const repo = config.repos.find((r) => r.owner === owner && r.name === name);
264
+ if (!repo) {
265
+ throw new ConfigError(
266
+ `Repository ${owner}/${name} not found in config. Add it under the 'repos' section.`
267
+ );
268
+ }
269
+ return {
270
+ owner: repo.owner,
271
+ name: repo.name,
272
+ defaultBranch: repo.defaultBranch,
273
+ testCommand: repo.testCommand,
274
+ lintCommand: repo.lintCommand,
275
+ buildCommand: repo.buildCommand,
276
+ runCommand: repo.runCommand,
277
+ claudeMdPath: repo.claudeMdPath,
278
+ model: config.defaults.model,
279
+ reviewerModel: config.defaults.reviewerModel,
280
+ plannerModel: config.defaults.plannerModel,
281
+ maxTurns: repo.maxTurns ?? config.defaults.maxTurns,
282
+ maxReviewCycles: repo.maxReviewCycles ?? config.defaults.maxReviewCycles,
283
+ maxTestRetries: repo.maxTestRetries ?? config.defaults.maxTestRetries,
284
+ timeoutMinutes: config.defaults.timeoutMinutes,
285
+ budgetPerTaskUsd: repo.budgetPerTaskUsd ?? config.defaults.budgetPerTaskUsd,
286
+ branchPrefix: config.defaults.branchPrefix,
287
+ labels: config.defaults.labels,
288
+ autoMerge: repo.autoMerge,
289
+ autoMergeRules: repo.autoMergeRules ?? {
290
+ types: ["feature", "bugfix", "refactor", "test", "docs"],
291
+ maxSize: "xl",
292
+ mergeStrategy: "squash",
293
+ requireCleanSecurityScan: false
294
+ },
295
+ allowedTools: repo.allowedTools ?? config.defaults.allowedTools,
296
+ enablePlanning: config.defaults.enablePlanning,
297
+ enableSecurityScan: config.defaults.enableSecurityScan,
298
+ localPath: repo.localPath
299
+ };
300
+ }
301
+
302
+ // src/pipeline/state-machine.ts
303
+ var VALID_TRANSITIONS = {
304
+ parsing_issue: ["creating_branch", "failed"],
305
+ creating_branch: ["planning", "coding", "failed"],
306
+ planning: ["coding", "failed"],
307
+ coding: ["running_tests", "failed"],
308
+ running_tests: ["reviewing", "coding", "failed"],
309
+ reviewing: ["awaiting_human", "addressing_review", "merging", "failed"],
310
+ addressing_review: ["coding", "failed"],
311
+ merging: ["completed", "awaiting_human", "failed"],
312
+ awaiting_human: ["completed", "failed"],
313
+ completed: [],
314
+ failed: []
315
+ };
316
+ function validateTransition(from, to) {
317
+ const valid = VALID_TRANSITIONS[from];
318
+ if (!valid.includes(to)) {
319
+ throw new PipelineError(`Invalid state transition: ${from} -> ${to}`);
320
+ }
321
+ }
322
+
323
+ // src/pipeline/feedback-loop.ts
324
+ function formatCommandFeedback(kind, output) {
325
+ const maxLen = 8e3;
326
+ const truncated = output.length > maxLen ? output.slice(0, maxLen) + "\n\n... (truncated)" : output;
327
+ const labels = {
328
+ lint: "Lint",
329
+ build: "Build",
330
+ test: "Tests",
331
+ run: "Run verification"
332
+ };
333
+ const label = labels[kind] ?? kind.charAt(0).toUpperCase() + kind.slice(1);
334
+ return `${label} failed. Output:
335
+
336
+ ${truncated}`;
337
+ }
338
+
339
+ // src/github/client.ts
340
+ import { Octokit } from "@octokit/rest";
341
+ var log = createChildLogger("github");
342
+ var RETRY_OPTIONS = {
343
+ maxRetries: 3,
344
+ baseDelayMs: 1e3,
345
+ maxDelayMs: 3e4,
346
+ onRetry: (error, attempt) => {
347
+ log.warn({ error: error.message, attempt }, "GitHub API call failed, retrying");
348
+ }
349
+ };
350
+ async function withGitHubRetry(fn) {
351
+ return withRetry(async () => {
352
+ try {
353
+ return await fn();
354
+ } catch (error) {
355
+ const status = error?.status ?? error?.response?.status;
356
+ if (status === 429 || status === 403) {
357
+ const retryAfter = error?.response?.headers?.["retry-after"];
358
+ const retryMs = retryAfter ? parseInt(retryAfter) * 1e3 : void 0;
359
+ throw new RetryableError(`GitHub API rate limited (${status})`, retryMs);
360
+ }
361
+ if (status >= 500) {
362
+ throw new RetryableError(`GitHub API server error (${status})`);
363
+ }
364
+ throw error;
365
+ }
366
+ }, RETRY_OPTIONS);
367
+ }
368
+ var _octokit = null;
369
+ function getOctokit() {
370
+ if (!_octokit) {
371
+ const token = process.env.GITHUB_TOKEN;
372
+ if (!token) {
373
+ throw new Error("GITHUB_TOKEN environment variable is required");
374
+ }
375
+ _octokit = new Octokit({ auth: token });
376
+ }
377
+ return _octokit;
378
+ }
379
+ async function getIssue(owner, repo, issueNumber) {
380
+ return withGitHubRetry(async () => {
381
+ const octokit = getOctokit();
382
+ log.debug({ owner, repo, issueNumber }, "Fetching issue");
383
+ const { data } = await octokit.issues.get({ owner, repo, issue_number: issueNumber });
384
+ return data;
385
+ });
386
+ }
387
+ async function getIssueLabels(owner, repo, issueNumber) {
388
+ return withGitHubRetry(async () => {
389
+ const octokit = getOctokit();
390
+ const { data } = await octokit.issues.listLabelsOnIssue({
391
+ owner,
392
+ repo,
393
+ issue_number: issueNumber
394
+ });
395
+ return data.map((l) => l.name);
396
+ });
397
+ }
398
+ async function addLabel(owner, repo, issueNumber, label) {
399
+ return withGitHubRetry(async () => {
400
+ const octokit = getOctokit();
401
+ log.debug({ owner, repo, issueNumber, label }, "Adding label");
402
+ await octokit.issues.addLabels({ owner, repo, issue_number: issueNumber, labels: [label] });
403
+ });
404
+ }
405
+ async function removeLabel(owner, repo, issueNumber, label) {
406
+ const octokit = getOctokit();
407
+ log.debug({ owner, repo, issueNumber, label }, "Removing label");
408
+ try {
409
+ await octokit.issues.removeLabel({ owner, repo, issue_number: issueNumber, name: label });
410
+ } catch {
411
+ }
412
+ }
413
+ async function addComment(owner, repo, issueNumber, body) {
414
+ return withGitHubRetry(async () => {
415
+ const octokit = getOctokit();
416
+ log.debug({ owner, repo, issueNumber }, "Adding comment");
417
+ const { data } = await octokit.issues.createComment({
418
+ owner,
419
+ repo,
420
+ issue_number: issueNumber,
421
+ body
422
+ });
423
+ return data;
424
+ });
425
+ }
426
+ async function getPRDiff(owner, repo, prNumber) {
427
+ return withGitHubRetry(async () => {
428
+ const octokit = getOctokit();
429
+ log.debug({ owner, repo, prNumber }, "Fetching PR diff");
430
+ const { data } = await octokit.pulls.get({
431
+ owner,
432
+ repo,
433
+ pull_number: prNumber,
434
+ mediaType: { format: "diff" }
435
+ });
436
+ return data;
437
+ });
438
+ }
439
+ function parseIssueUrl(url) {
440
+ const match = url.match(/github\.com\/([^/]+)\/([^/]+)\/issues\/(\d+)/);
441
+ if (!match) {
442
+ throw new Error(`Invalid GitHub issue URL: ${url}`);
443
+ }
444
+ return {
445
+ owner: match[1],
446
+ repo: match[2],
447
+ issueNumber: parseInt(match[3], 10)
448
+ };
449
+ }
450
+ function parsePRUrl(url) {
451
+ const match = url.match(/github\.com\/([^/]+)\/([^/]+)\/pull\/(\d+)/);
452
+ if (!match) {
453
+ throw new Error(`Invalid GitHub PR URL: ${url}`);
454
+ }
455
+ return {
456
+ owner: match[1],
457
+ repo: match[2],
458
+ prNumber: parseInt(match[3], 10)
459
+ };
460
+ }
461
+
462
+ // src/github/issue-parser.ts
463
+ function extractSection(body, heading) {
464
+ const headingRegex = new RegExp(`^#{2,3}\\s+${escapeRegex(heading)}\\s*$`, "mi");
465
+ const headingMatch = headingRegex.exec(body);
466
+ if (!headingMatch) return null;
467
+ const startIdx = headingMatch.index + headingMatch[0].length;
468
+ const rest = body.slice(startIdx);
469
+ const nextHeadingMatch = rest.match(/\n#{2,3}\s+/);
470
+ const content = nextHeadingMatch ? rest.slice(0, nextHeadingMatch.index) : rest;
471
+ const trimmed = content.trim();
472
+ return trimmed.length > 0 ? trimmed : null;
473
+ }
474
+ function escapeRegex(str) {
475
+ return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
476
+ }
477
+ function parseBulletList(text) {
478
+ return text.split("\n").map((line) => line.replace(/^[-*]\s*(\[[ x]\]\s*)?/, "").trim()).filter((line) => line.length > 0);
479
+ }
480
+ function extractPriority(labels) {
481
+ for (const label of labels) {
482
+ const lower = label.toLowerCase();
483
+ if (lower.includes("critical")) return "critical";
484
+ if (lower.includes("priority/high") || lower === "high") return "high";
485
+ if (lower.includes("priority/medium") || lower === "medium") return "medium";
486
+ if (lower.includes("priority/low") || lower === "low") return "low";
487
+ }
488
+ return "medium";
489
+ }
490
+ function extractType(labels) {
491
+ for (const label of labels) {
492
+ const lower = label.toLowerCase();
493
+ if (lower.includes("feature") || lower.includes("enhancement")) return "feature";
494
+ if (lower.includes("bug")) return "bugfix";
495
+ if (lower.includes("refactor")) return "refactor";
496
+ if (lower.includes("test")) return "test";
497
+ if (lower.includes("doc")) return "docs";
498
+ }
499
+ return "feature";
500
+ }
501
+ function extractSize(labels, body) {
502
+ for (const label of labels) {
503
+ const lower = label.toLowerCase();
504
+ if (lower.includes("size/xs") || lower.includes("extra small")) return "xs";
505
+ if (lower.includes("size/s") || lower === "small") return "s";
506
+ if (lower.includes("size/m") || lower === "medium") return "m";
507
+ if (lower.includes("size/l") && !lower.includes("xl")) return "l";
508
+ if (lower.includes("size/xl") || lower.includes("extra large")) return "xl";
509
+ }
510
+ const sizeSection = extractSection(body, "Estimated Size");
511
+ if (sizeSection) {
512
+ const lower = sizeSection.toLowerCase();
513
+ if (lower.includes("xs") || lower.includes("extra small")) return "xs";
514
+ if (lower.includes("xl") || lower.includes("extra large")) return "xl";
515
+ if (lower.includes("< 50") || lower.match(/\bs\b/)) return "s";
516
+ if (lower.includes("150-500") || lower.match(/\bm\b/)) return "m";
517
+ if (lower.includes("500-1000") || lower.match(/\bl\b/)) return "l";
518
+ }
519
+ return "m";
520
+ }
521
+ function parseIssueBody(params) {
522
+ const { title, body, labels, issueNumber, issueUrl, repoOwner, repoName } = params;
523
+ if (!body || body.trim().length === 0) {
524
+ throw new IssueParseError("Issue body is empty");
525
+ }
526
+ const context = extractSection(body, "Context");
527
+ if (!context) {
528
+ throw new IssueParseError('Missing required section: "## Context"');
529
+ }
530
+ const criteriaSection = extractSection(body, "Acceptance Criteria");
531
+ if (!criteriaSection) {
532
+ throw new IssueParseError('Missing required section: "## Acceptance Criteria"');
533
+ }
534
+ const acceptanceCriteria = parseBulletList(criteriaSection);
535
+ if (acceptanceCriteria.length === 0) {
536
+ throw new IssueParseError("Acceptance Criteria section has no items");
537
+ }
538
+ const filesSection = extractSection(body, "Affected Files");
539
+ const affectedFiles = filesSection ? parseBulletList(filesSection) : [];
540
+ const edgeCasesSection = extractSection(body, "Edge Cases");
541
+ const edgeCases = edgeCasesSection ? parseBulletList(edgeCasesSection) : [];
542
+ return {
543
+ title,
544
+ context,
545
+ acceptanceCriteria,
546
+ affectedFiles,
547
+ edgeCases,
548
+ priority: extractPriority(labels),
549
+ type: extractType(labels),
550
+ size: extractSize(labels, body),
551
+ issueNumber,
552
+ issueUrl,
553
+ repoOwner,
554
+ repoName
555
+ };
556
+ }
557
+
558
+ // src/github/branch.ts
559
+ import { tmpdir } from "os";
560
+ import { join } from "path";
561
+ import { mkdtemp, access, writeFile } from "fs/promises";
562
+ import { simpleGit } from "simple-git";
563
+ var DEFAULT_GITIGNORE = `node_modules/
564
+ .next/
565
+ dist/
566
+ build/
567
+ .env
568
+ .env.local
569
+ .env*.local
570
+ *.tsbuildinfo
571
+ .DS_Store
572
+ `;
573
+ var log2 = createChildLogger("git");
574
+ function slugify(text) {
575
+ return text.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "").slice(0, 40);
576
+ }
577
+ async function setupBranch(params) {
578
+ const { owner, repo, defaultBranch, issueNumber, issueTitle, branchPrefix, localDir } = params;
579
+ const slug = slugify(issueTitle);
580
+ const branchName = `${branchPrefix}/${issueNumber}-${slug}`;
581
+ if (localDir) {
582
+ log2.info({ workDir: localDir, branchName }, "Using local repository");
583
+ const repoGit2 = simpleGit(localDir);
584
+ await repoGit2.fetch("origin");
585
+ const status = await repoGit2.status();
586
+ const hadChanges = status.files.length > 0;
587
+ if (hadChanges) {
588
+ log2.info("Stashing local changes");
589
+ await repoGit2.stash();
590
+ }
591
+ try {
592
+ await repoGit2.checkout(["-B", branchName, `origin/${defaultBranch}`]);
593
+ } catch {
594
+ await repoGit2.checkoutLocalBranch(branchName);
595
+ }
596
+ await repoGit2.addConfig("user.name", "X-Force Bot");
597
+ await repoGit2.addConfig("user.email", "xforce-bot@users.noreply.github.com");
598
+ log2.info({ branchName }, "Branch created");
599
+ return { workDir: localDir, branchName, git: repoGit2, isLocal: true };
600
+ }
601
+ const cloneUrl = `https://x-access-token:${process.env.GITHUB_TOKEN}@github.com/${owner}/${repo}.git`;
602
+ const workDir = await mkdtemp(join(tmpdir(), `xforce-${repo}-`));
603
+ log2.info({ workDir, branchName }, "Cloning repository");
604
+ const git = simpleGit();
605
+ await git.clone(cloneUrl, workDir, ["--depth", "1", "--branch", defaultBranch]);
606
+ const repoGit = simpleGit(workDir);
607
+ await repoGit.addConfig("user.name", "X-Force Bot");
608
+ await repoGit.addConfig("user.email", "xforce-bot@users.noreply.github.com");
609
+ await repoGit.checkoutLocalBranch(branchName);
610
+ const gitignorePath = join(workDir, ".gitignore");
611
+ try {
612
+ await access(gitignorePath);
613
+ } catch {
614
+ log2.info("Creating default .gitignore");
615
+ await writeFile(gitignorePath, DEFAULT_GITIGNORE, "utf-8");
616
+ }
617
+ log2.info({ branchName }, "Branch created");
618
+ return { workDir, branchName, git: repoGit, isLocal: false };
619
+ }
620
+ async function restoreDefaultBranch(git, defaultBranch) {
621
+ try {
622
+ await git.checkout(defaultBranch);
623
+ log2.info({ branch: defaultBranch }, "Restored default branch");
624
+ } catch (error) {
625
+ log2.warn({ error: String(error) }, "Failed to restore default branch");
626
+ }
627
+ }
628
+ async function commitAndPush(params) {
629
+ const { git, branchName, message } = params;
630
+ await git.add("-A");
631
+ const status = await git.status();
632
+ if (status.staged.length === 0 && status.files.length === 0) {
633
+ log2.warn("No changes to commit");
634
+ return "";
635
+ }
636
+ const commitResult = await git.commit(message);
637
+ log2.info({ sha: commitResult.commit }, "Changes committed");
638
+ await git.push("origin", branchName, ["--set-upstream", "--force"]);
639
+ log2.info({ branchName }, "Pushed to remote");
640
+ return commitResult.commit;
641
+ }
642
+
643
+ // src/github/pr.ts
644
+ var log3 = createChildLogger("pr");
645
+ async function createPullRequest(params) {
646
+ const { owner, repo, branchName, defaultBranch, taskSpec, pipeline } = params;
647
+ const octokit = getOctokit();
648
+ const body = buildPRBody(taskSpec, pipeline);
649
+ log3.info({ owner, repo, branchName }, "Creating pull request");
650
+ const { data } = await octokit.pulls.create({
651
+ owner,
652
+ repo,
653
+ title: `[X-Force] ${taskSpec.title}`,
654
+ body,
655
+ head: branchName,
656
+ base: defaultBranch
657
+ });
658
+ log3.info({ prNumber: data.number, prUrl: data.html_url }, "PR created");
659
+ return { prNumber: data.number, prUrl: data.html_url };
660
+ }
661
+ async function updatePullRequest(params) {
662
+ const { owner, repo, prNumber, taskSpec, pipeline } = params;
663
+ const octokit = getOctokit();
664
+ const body = buildPRBody(taskSpec, pipeline);
665
+ await octokit.pulls.update({ owner, repo, pull_number: prNumber, body });
666
+ }
667
+ async function labelPR(owner, repo, prNumber, label) {
668
+ const octokit = getOctokit();
669
+ await octokit.issues.addLabels({ owner, repo, issue_number: prNumber, labels: [label] });
670
+ }
671
+ async function commentOnPR(owner, repo, prNumber, body) {
672
+ const octokit = getOctokit();
673
+ await octokit.issues.createComment({ owner, repo, issue_number: prNumber, body });
674
+ }
675
+ async function mergePR(params) {
676
+ const { owner, repo, prNumber, strategy, commitTitle } = params;
677
+ const octokit = getOctokit();
678
+ log3.info({ owner, repo, prNumber, strategy }, "Attempting to merge PR");
679
+ try {
680
+ const { data } = await octokit.pulls.merge({
681
+ owner,
682
+ repo,
683
+ pull_number: prNumber,
684
+ merge_method: strategy,
685
+ commit_title: commitTitle
686
+ });
687
+ log3.info({ sha: data.sha, merged: data.merged }, "PR merge result");
688
+ return { merged: data.merged, sha: data.sha };
689
+ } catch (error) {
690
+ const err = error;
691
+ const message = err.message ?? String(error);
692
+ const status = err.status;
693
+ if (status === 405 || status === 409) {
694
+ log3.warn({ status, message }, "PR cannot be auto-merged");
695
+ return { merged: false, error: message };
696
+ }
697
+ throw error;
698
+ }
699
+ }
700
+ function buildPRBody(taskSpec, pipeline) {
701
+ return `## Automated by X-Force
702
+
703
+ Resolves #${taskSpec.issueNumber}
704
+
705
+ ### Task
706
+ **${taskSpec.title}**
707
+
708
+ ${taskSpec.context}
709
+
710
+ ### Acceptance Criteria
711
+ ${taskSpec.acceptanceCriteria.map((c) => `- [ ] ${c}`).join("\n")}
712
+
713
+ ### Pipeline Info
714
+ - **Pipeline ID**: \`${pipeline.id}\`
715
+ - **Review Cycles**: ${pipeline.reviewCycle}
716
+ - **Total Cost**: $${pipeline.totalCostUsd.toFixed(4)}
717
+ - **Status**: ${pipeline.status}
718
+
719
+ ---
720
+ *Generated by [X-Force](https://github.com/xforce) AI Pipeline*`;
721
+ }
722
+
723
+ // src/agents/coder.ts
724
+ import { query } from "@anthropic-ai/claude-agent-sdk";
725
+
726
+ // src/agents/prompts/coder-system.ts
727
+ var CODER_SYSTEM_PROMPT_APPEND = `
728
+ You are working as part of an automated development pipeline called X-Force.
729
+ You receive specifications from GitHub Issues and implement them autonomously.
730
+
731
+ CRITICAL: START WRITING CODE IMMEDIATELY. Do not spend more than a few turns reading existing files. Your primary job is to CREATE and EDIT files. The pipeline will handle committing, testing, and reviewing separately \u2014 you must NOT run tests or create git commits yourself.
732
+
733
+ RULES:
734
+ 1. WRITE CODE FIRST. Briefly check the repo structure, then start creating/editing files right away.
735
+ 2. Implement EXACTLY what the spec asks for. Do not add unrequested features.
736
+ 3. Follow the existing code style and patterns in the repository.
737
+ 4. Make minimal, focused changes. Do not refactor unrelated code.
738
+ 5. Ensure a .gitignore exists with at minimum: node_modules/, .next/, dist/, build/, .env, .env.local
739
+ 6. NEVER commit dependency directories (node_modules, vendor, .venv, __pycache__, etc.).
740
+
741
+ DO NOT:
742
+ - Run tests (the pipeline runs tests after you finish)
743
+ - Run build commands (the pipeline handles build verification)
744
+ - Run lint commands (the pipeline handles lint checking)
745
+ - Create git commits (the pipeline commits your changes)
746
+ - Run git commands (the pipeline manages git)
747
+ - Spend excessive turns exploring \u2014 get to writing code quickly
748
+
749
+ When you receive review feedback (marked with "## Review Feedback"), address EVERY issue marked as critical or major. For minor issues, use your judgment.
750
+
751
+ When you receive test failure output (marked with "## Test Failures"), analyze the failures and fix your code to make all tests pass.
752
+ `;
753
+ function buildCoderPrompt(taskSpec) {
754
+ return `## Task: ${taskSpec.title}
755
+
756
+ ### Context
757
+ ${taskSpec.context}
758
+
759
+ ### Acceptance Criteria
760
+ ${taskSpec.acceptanceCriteria.map((c, i) => `${i + 1}. ${c}`).join("\n")}
761
+
762
+ ### Affected Files
763
+ ${taskSpec.affectedFiles.length > 0 ? taskSpec.affectedFiles.map((f) => `- ${f}`).join("\n") : "Not specified - determine the best files to modify."}
764
+
765
+ ### Edge Cases to Handle
766
+ ${taskSpec.edgeCases.length > 0 ? taskSpec.edgeCases.map((e) => `- ${e}`).join("\n") : "None specified."}
767
+
768
+ ### Task Type: ${taskSpec.type} | Size: ${taskSpec.size} | Priority: ${taskSpec.priority}
769
+
770
+ Please implement this task now. Start writing code immediately \u2014 do not over-analyze. The pipeline will handle testing, committing, and reviewing your changes.`;
771
+ }
772
+ function buildReviewFeedbackSection(review) {
773
+ const issues = review.issues.map(
774
+ (issue) => `- **[${issue.severity.toUpperCase()}]** ${issue.file}${issue.line ? `:${issue.line}` : ""}
775
+ ${issue.description}${issue.suggestedFix ? `
776
+ Suggested fix: ${issue.suggestedFix}` : ""}`
777
+ ).join("\n");
778
+ return `## Review Feedback (Address These Issues)
779
+
780
+ ### Summary
781
+ ${review.summary}
782
+
783
+ ### Issues to Fix
784
+ ${issues || "No specific issues listed."}
785
+
786
+ ### Unmet Acceptance Criteria
787
+ ${review.specAdherence.unmet.length > 0 ? review.specAdherence.unmet.map((c) => `- ${c}`).join("\n") : "All criteria met."}
788
+
789
+ ### Security Concerns
790
+ ${review.securityConcerns.length > 0 ? review.securityConcerns.map((c) => `- ${c}`).join("\n") : "None."}
791
+
792
+ Address all CRITICAL and MAJOR issues. Then re-run tests.`;
793
+ }
794
+ function buildTestFailureFeedbackSection(testOutput) {
795
+ return buildCommandFailureFeedbackSection("test", testOutput);
796
+ }
797
+ function buildCommandFailureFeedbackSection(kind, output) {
798
+ const label = kind.charAt(0).toUpperCase() + kind.slice(1);
799
+ return `## ${label} Failures
800
+
801
+ The following ${kind} failures were detected after your changes. Please analyze and fix them:
802
+
803
+ \`\`\`
804
+ ${output}
805
+ \`\`\`
806
+
807
+ Fix the code to make the ${kind} pass. Do not commit \u2014 the pipeline handles that.`;
808
+ }
809
+
810
+ // src/agents/prompts/planner-system.ts
811
+ var PLANNER_SYSTEM_PROMPT = `You are a senior software architect analyzing a codebase to create an implementation plan.
812
+
813
+ You have access to read-only tools: Read, Glob, and Grep. Use them to:
814
+ 1. Explore the repository structure (Glob for file patterns, Read for file contents)
815
+ 2. Understand existing patterns, conventions, and architecture
816
+ 3. Identify the exact files that need to be modified or created
817
+ 4. Analyze dependencies and potential ripple effects
818
+
819
+ RULES:
820
+ - Start by quickly checking the project structure (Glob for key file patterns). If the repo is empty or has very few files, skip deep exploration and produce your plan immediately.
821
+ - Be specific about file paths \u2014 use actual paths you found in the codebase, or specify paths to create for greenfield projects.
822
+ - Consider test files and their patterns.
823
+ - Identify potential risks (breaking changes, missing test coverage, security).
824
+ - Estimate complexity honestly.
825
+ - Your plan should be actionable and concrete, not vague.
826
+ - Do NOT spend more than a few turns exploring. Produce your JSON plan as quickly as possible.
827
+
828
+ You MUST respond with valid JSON matching the required schema.`;
829
+ function buildPlannerPrompt(taskSpec) {
830
+ return `## Task: ${taskSpec.title}
831
+
832
+ ### Context
833
+ ${taskSpec.context}
834
+
835
+ ### Acceptance Criteria
836
+ ${taskSpec.acceptanceCriteria.map((c, i) => `${i + 1}. ${c}`).join("\n")}
837
+
838
+ ### Affected Files (from issue \u2014 verify these)
839
+ ${taskSpec.affectedFiles.length > 0 ? taskSpec.affectedFiles.map((f) => `- ${f}`).join("\n") : "Not specified \u2014 determine from codebase analysis."}
840
+
841
+ ### Edge Cases to Handle
842
+ ${taskSpec.edgeCases.length > 0 ? taskSpec.edgeCases.map((e) => `- ${e}`).join("\n") : "None specified."}
843
+
844
+ ### Task Type: ${taskSpec.type} | Size: ${taskSpec.size} | Priority: ${taskSpec.priority}
845
+
846
+ Analyze the codebase and produce a structured implementation plan. Use your tools to explore the project structure and understand the existing patterns before planning.`;
847
+ }
848
+ function buildPlanSection(plan) {
849
+ const steps = plan.implementationSteps.sort((a, b) => a.order - b.order).map(
850
+ (s) => `${s.order}. ${s.description}
851
+ Files: ${s.files.join(", ")}
852
+ Rationale: ${s.rationale}`
853
+ ).join("\n\n");
854
+ return `## Implementation Plan (Follow This)
855
+
856
+ ### Approach
857
+ ${plan.approach}
858
+
859
+ ### Files to Modify
860
+ ${plan.filesToModify.map((f) => `- ${f}`).join("\n")}
861
+
862
+ ### Files to Create
863
+ ${plan.filesToCreate.map((f) => `- ${f}`).join("\n") || "None"}
864
+
865
+ ### Steps
866
+ ${steps}
867
+
868
+ ### Risks to Watch For
869
+ ${plan.risks.map((r) => `- ${r}`).join("\n") || "None identified"}
870
+
871
+ Follow this plan closely. If you discover the plan is incorrect or incomplete, adapt as needed but document your deviations.`;
872
+ }
873
+
874
+ // src/agents/coder.ts
875
+ var log4 = createChildLogger("coder");
876
+ function truncate(s, max) {
877
+ return s.length > max ? s.slice(0, max) + "..." : s;
878
+ }
879
+ function describeToolUse(name, input) {
880
+ const path = input?.file_path;
881
+ switch (name) {
882
+ case "Read":
883
+ return `Reading ${path ?? "file"}`;
884
+ case "Write":
885
+ return `Writing ${path ?? "file"}`;
886
+ case "Edit":
887
+ return `Editing ${path ?? "file"}`;
888
+ case "Bash":
889
+ return `Running: ${truncate(input?.command ?? "command", 80)}`;
890
+ case "Glob":
891
+ return `Searching files: ${input?.pattern ?? ""}`;
892
+ case "Grep":
893
+ return `Searching for: ${truncate(input?.pattern ?? "", 60)}`;
894
+ default:
895
+ return `Using ${name}`;
896
+ }
897
+ }
898
+ async function runCodingAgent(params) {
899
+ const { taskSpec, repoConfig, workingDir, previousReview, testFailures, commandFailures, sessionId, plan, onProgress } = params;
900
+ let prompt = buildCoderPrompt(taskSpec);
901
+ if (plan) {
902
+ prompt += "\n\n" + buildPlanSection(plan);
903
+ }
904
+ if (previousReview) {
905
+ prompt += "\n\n" + buildReviewFeedbackSection(previousReview);
906
+ }
907
+ if (testFailures) {
908
+ prompt += "\n\n" + buildTestFailureFeedbackSection(testFailures);
909
+ }
910
+ if (commandFailures) {
911
+ prompt += "\n\n" + buildCommandFailureFeedbackSection(commandFailures.kind, commandFailures.output);
912
+ }
913
+ log4.info(
914
+ {
915
+ task: taskSpec.title,
916
+ model: repoConfig.model,
917
+ maxTurns: repoConfig.maxTurns,
918
+ budget: repoConfig.budgetPerTaskUsd,
919
+ hasReview: !!previousReview,
920
+ hasTestFailures: !!testFailures
921
+ },
922
+ "Starting coding agent"
923
+ );
924
+ const result = query({
925
+ prompt,
926
+ options: {
927
+ cwd: workingDir,
928
+ model: repoConfig.model,
929
+ maxTurns: repoConfig.maxTurns,
930
+ maxBudgetUsd: repoConfig.budgetPerTaskUsd,
931
+ allowedTools: repoConfig.allowedTools,
932
+ permissionMode: "bypassPermissions",
933
+ allowDangerouslySkipPermissions: true,
934
+ systemPrompt: {
935
+ type: "preset",
936
+ preset: "claude_code",
937
+ append: CODER_SYSTEM_PROMPT_APPEND
938
+ },
939
+ ...sessionId ? { resume: sessionId } : {}
940
+ }
941
+ });
942
+ let resultMessage;
943
+ for await (const message of result) {
944
+ if (message.type === "assistant" && "content" in message) {
945
+ const content = message.content;
946
+ for (const block of content) {
947
+ if (block.type === "tool_use" && block.name) {
948
+ const detail = describeToolUse(block.name, block.input);
949
+ log4.info({ tool: block.name }, detail);
950
+ onProgress?.(detail);
951
+ }
952
+ }
953
+ }
954
+ if (message.type === "result") {
955
+ resultMessage = message;
956
+ }
957
+ }
958
+ if (!resultMessage) {
959
+ throw new CodingAgentError("No result message received from coding agent", [], 0);
960
+ }
961
+ if (resultMessage.subtype === "error_max_turns") {
962
+ log4.warn(
963
+ {
964
+ cost: resultMessage.total_cost_usd,
965
+ turns: resultMessage.num_turns
966
+ },
967
+ "Coding agent hit max turns \u2014 continuing with partial work"
968
+ );
969
+ return {
970
+ sessionId: resultMessage.session_id,
971
+ costUsd: resultMessage.total_cost_usd,
972
+ result: "Agent hit max turns limit. Partial work may have been completed.",
973
+ numTurns: resultMessage.num_turns
974
+ };
975
+ }
976
+ if (resultMessage.subtype !== "success") {
977
+ throw new CodingAgentError(
978
+ `Coding agent failed: ${resultMessage.subtype}`,
979
+ "errors" in resultMessage ? resultMessage.errors : [],
980
+ resultMessage.total_cost_usd
981
+ );
982
+ }
983
+ log4.info(
984
+ {
985
+ cost: resultMessage.total_cost_usd,
986
+ turns: resultMessage.num_turns
987
+ },
988
+ "Coding agent completed"
989
+ );
990
+ return {
991
+ sessionId: resultMessage.session_id,
992
+ costUsd: resultMessage.total_cost_usd,
993
+ result: resultMessage.result,
994
+ numTurns: resultMessage.num_turns
995
+ };
996
+ }
997
+
998
+ // src/agents/planner.ts
999
+ import { query as query2 } from "@anthropic-ai/claude-agent-sdk";
1000
+ import { z as z2 } from "zod";
1001
+ var log5 = createChildLogger("planner");
1002
+ var ImplementationStepSchema = z2.object({
1003
+ order: z2.number().int().positive(),
1004
+ description: z2.string(),
1005
+ files: z2.array(z2.string()),
1006
+ rationale: z2.string()
1007
+ });
1008
+ var ImplementationPlanSchema = z2.object({
1009
+ approach: z2.string(),
1010
+ filesToModify: z2.array(z2.string()),
1011
+ filesToCreate: z2.array(z2.string()),
1012
+ estimatedComplexity: z2.enum(["low", "medium", "high"]),
1013
+ risks: z2.array(z2.string()),
1014
+ implementationSteps: z2.array(ImplementationStepSchema),
1015
+ estimatedTurns: z2.number().int().positive()
1016
+ });
1017
+ var PLAN_JSON_SCHEMA = {
1018
+ type: "object",
1019
+ properties: {
1020
+ approach: { type: "string" },
1021
+ filesToModify: { type: "array", items: { type: "string" } },
1022
+ filesToCreate: { type: "array", items: { type: "string" } },
1023
+ estimatedComplexity: { type: "string", enum: ["low", "medium", "high"] },
1024
+ risks: { type: "array", items: { type: "string" } },
1025
+ implementationSteps: {
1026
+ type: "array",
1027
+ items: {
1028
+ type: "object",
1029
+ properties: {
1030
+ order: { type: "number" },
1031
+ description: { type: "string" },
1032
+ files: { type: "array", items: { type: "string" } },
1033
+ rationale: { type: "string" }
1034
+ },
1035
+ required: ["order", "description", "files", "rationale"]
1036
+ }
1037
+ },
1038
+ estimatedTurns: { type: "number" }
1039
+ },
1040
+ required: [
1041
+ "approach",
1042
+ "filesToModify",
1043
+ "filesToCreate",
1044
+ "estimatedComplexity",
1045
+ "risks",
1046
+ "implementationSteps",
1047
+ "estimatedTurns"
1048
+ ]
1049
+ };
1050
+ function extractJSON(text) {
1051
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
1052
+ if (jsonMatch) return jsonMatch[0];
1053
+ const fenceMatch = text.match(/```(?:json)?\s*\n?([\s\S]*?)\n?```/);
1054
+ if (fenceMatch) return fenceMatch[1].trim();
1055
+ throw new PipelineError("Could not extract JSON from planner response");
1056
+ }
1057
+ function validatePlan(parsed) {
1058
+ const validated = ImplementationPlanSchema.safeParse(parsed);
1059
+ if (!validated.success) {
1060
+ const errors = validated.error.issues.map((i) => `${i.path.join(".")}: ${i.message}`).join(", ");
1061
+ throw new PipelineError(`Planner response validation failed: ${errors}`);
1062
+ }
1063
+ return validated.data;
1064
+ }
1065
+ async function runPlanningAgent(params) {
1066
+ const { taskSpec, repoConfig, workingDir, onProgress } = params;
1067
+ const prompt = buildPlannerPrompt(taskSpec);
1068
+ log5.info(
1069
+ { task: taskSpec.title, model: repoConfig.plannerModel },
1070
+ "Starting planning agent"
1071
+ );
1072
+ const result = query2({
1073
+ prompt,
1074
+ options: {
1075
+ cwd: workingDir,
1076
+ model: repoConfig.plannerModel,
1077
+ maxTurns: 50,
1078
+ allowedTools: ["Read", "Glob", "Grep"],
1079
+ permissionMode: "bypassPermissions",
1080
+ allowDangerouslySkipPermissions: true,
1081
+ systemPrompt: PLANNER_SYSTEM_PROMPT,
1082
+ outputFormat: {
1083
+ type: "json_schema",
1084
+ schema: PLAN_JSON_SCHEMA
1085
+ }
1086
+ }
1087
+ });
1088
+ let resultMessage;
1089
+ let lastAssistantText = "";
1090
+ for await (const message of result) {
1091
+ if (message.type === "assistant" && "content" in message) {
1092
+ const content = message.content;
1093
+ for (const block of content) {
1094
+ if (block.type === "tool_use" && block.name) {
1095
+ const detail = describeToolUse(block.name, block.input);
1096
+ log5.info({ tool: block.name }, detail);
1097
+ onProgress?.(detail);
1098
+ }
1099
+ }
1100
+ const textParts = content.filter((c) => c.type === "text" && c.text).map((c) => c.text);
1101
+ if (textParts.length > 0) {
1102
+ lastAssistantText = textParts.join("\n");
1103
+ }
1104
+ }
1105
+ if (message.type === "result") {
1106
+ resultMessage = message;
1107
+ }
1108
+ }
1109
+ if (!resultMessage) {
1110
+ throw new PipelineError("No result message received from planning agent");
1111
+ }
1112
+ log5.debug(
1113
+ {
1114
+ subtype: resultMessage.subtype,
1115
+ hasStructuredOutput: "structured_output" in resultMessage && !!resultMessage.structured_output,
1116
+ hasResult: "result" in resultMessage && !!resultMessage.result,
1117
+ hasLastAssistant: !!lastAssistantText,
1118
+ lastAssistantLength: lastAssistantText.length
1119
+ },
1120
+ "Planning agent result received"
1121
+ );
1122
+ const isSuccess = resultMessage.subtype === "success";
1123
+ const isMaxTurns = resultMessage.subtype === "error_max_turns";
1124
+ if (!isSuccess && !isMaxTurns) {
1125
+ const errorDetail = "errors" in resultMessage ? resultMessage.errors.join(", ") : "unknown";
1126
+ throw new PipelineError(
1127
+ `Planning agent failed (${resultMessage.subtype}): ${errorDetail}`
1128
+ );
1129
+ }
1130
+ if (isMaxTurns) {
1131
+ log5.warn("Planning agent hit max turns \u2014 attempting to extract plan");
1132
+ }
1133
+ let parsed;
1134
+ if ("structured_output" in resultMessage && resultMessage.structured_output) {
1135
+ parsed = resultMessage.structured_output;
1136
+ } else if ("result" in resultMessage && resultMessage.result) {
1137
+ const jsonStr = extractJSON(resultMessage.result);
1138
+ try {
1139
+ parsed = JSON.parse(jsonStr);
1140
+ } catch {
1141
+ throw new PipelineError(
1142
+ `Planner returned invalid JSON: ${String(resultMessage.result).slice(0, 200)}`
1143
+ );
1144
+ }
1145
+ } else if (lastAssistantText) {
1146
+ log5.warn("No result output \u2014 extracting plan from last assistant message");
1147
+ const jsonStr = extractJSON(lastAssistantText);
1148
+ try {
1149
+ parsed = JSON.parse(jsonStr);
1150
+ } catch {
1151
+ throw new PipelineError(
1152
+ `Planner returned invalid JSON in assistant message: ${lastAssistantText.slice(0, 200)}`
1153
+ );
1154
+ }
1155
+ } else {
1156
+ throw new PipelineError("Planning agent produced no output");
1157
+ }
1158
+ const plan = validatePlan(parsed);
1159
+ log5.info(
1160
+ {
1161
+ complexity: plan.estimatedComplexity,
1162
+ steps: plan.implementationSteps.length,
1163
+ filesToModify: plan.filesToModify.length
1164
+ },
1165
+ "Plan created"
1166
+ );
1167
+ return {
1168
+ plan,
1169
+ costUsd: resultMessage.total_cost_usd,
1170
+ numTurns: resultMessage.num_turns
1171
+ };
1172
+ }
1173
+
1174
+ // src/agents/reviewer.ts
1175
+ import { query as query3 } from "@anthropic-ai/claude-agent-sdk";
1176
+ import { z as z3 } from "zod";
1177
+
1178
+ // src/agents/prompts/reviewer-system.ts
1179
+ var REVIEWER_SYSTEM_PROMPT = `You are a senior code reviewer for an automated development pipeline.
1180
+ Your job is to review a code diff against a specification and provide structured feedback.
1181
+
1182
+ You must evaluate:
1183
+ 1. **Spec Adherence**: Does the code implement all acceptance criteria?
1184
+ 2. **Code Quality**: Is the code clean, maintainable, follows existing patterns?
1185
+ 3. **Security**: Are there any security vulnerabilities, injection risks, exposed secrets?
1186
+ 4. **Edge Cases**: Are the listed edge cases handled?
1187
+ 5. **Tests**: Are tests included and do they cover the key scenarios?
1188
+
1189
+ You MUST respond with valid JSON matching this schema:
1190
+ {
1191
+ "approved": boolean,
1192
+ "summary": "string - brief overall assessment",
1193
+ "issues": [
1194
+ {
1195
+ "severity": "critical|major|minor|suggestion",
1196
+ "file": "path/to/file",
1197
+ "line": number_or_null,
1198
+ "description": "what's wrong",
1199
+ "suggestedFix": "how to fix it (optional, can be null)"
1200
+ }
1201
+ ],
1202
+ "securityConcerns": ["string array, empty if none"],
1203
+ "specAdherence": {
1204
+ "met": ["criteria that are satisfied"],
1205
+ "unmet": ["criteria that are NOT satisfied"]
1206
+ }
1207
+ }
1208
+
1209
+ Rules:
1210
+ - Only mark "approved": true if ALL acceptance criteria are met AND there are zero critical/major issues.
1211
+ - Be specific about file paths and line numbers.
1212
+ - For review cycle > 0, focus on whether previous feedback was addressed.
1213
+ - Do NOT nitpick style if the code follows the repository's existing conventions.
1214
+ - Respond ONLY with the JSON object. No markdown fences, no explanation text.`;
1215
+ function buildReviewerUserPrompt(taskSpec, diff, reviewCycle) {
1216
+ return `## Original Specification
1217
+
1218
+ **Title**: ${taskSpec.title}
1219
+
1220
+ **Context**: ${taskSpec.context}
1221
+
1222
+ **Acceptance Criteria**:
1223
+ ${taskSpec.acceptanceCriteria.map((c, i) => `${i + 1}. ${c}`).join("\n")}
1224
+
1225
+ **Edge Cases**:
1226
+ ${taskSpec.edgeCases.length > 0 ? taskSpec.edgeCases.map((e) => `- ${e}`).join("\n") : "None specified."}
1227
+
1228
+ ## Code Diff to Review
1229
+
1230
+ \`\`\`diff
1231
+ ${diff}
1232
+ \`\`\`
1233
+
1234
+ ## Review Cycle: ${reviewCycle + 1}
1235
+ ${reviewCycle > 0 ? "This is a re-review after the coder addressed previous feedback. Focus on whether previous issues were fixed." : "This is the initial review."}
1236
+
1237
+ Provide your review as a JSON object matching the specified schema.`;
1238
+ }
1239
+
1240
+ // src/agents/reviewer.ts
1241
+ var log6 = createChildLogger("reviewer");
1242
+ var ReviewIssueSchema = z3.object({
1243
+ severity: z3.enum(["critical", "major", "minor", "suggestion"]),
1244
+ file: z3.string(),
1245
+ line: z3.number().nullable().optional(),
1246
+ description: z3.string(),
1247
+ suggestedFix: z3.string().nullable().optional()
1248
+ });
1249
+ var ReviewResultSchema = z3.object({
1250
+ approved: z3.boolean(),
1251
+ summary: z3.string(),
1252
+ issues: z3.array(ReviewIssueSchema),
1253
+ securityConcerns: z3.array(z3.string()),
1254
+ specAdherence: z3.object({
1255
+ met: z3.array(z3.string()),
1256
+ unmet: z3.array(z3.string())
1257
+ })
1258
+ });
1259
+ var REVIEW_JSON_SCHEMA = {
1260
+ type: "object",
1261
+ properties: {
1262
+ approved: { type: "boolean" },
1263
+ summary: { type: "string" },
1264
+ issues: {
1265
+ type: "array",
1266
+ items: {
1267
+ type: "object",
1268
+ properties: {
1269
+ severity: { type: "string", enum: ["critical", "major", "minor", "suggestion"] },
1270
+ file: { type: "string" },
1271
+ line: { type: ["number", "null"] },
1272
+ description: { type: "string" },
1273
+ suggestedFix: { type: ["string", "null"] }
1274
+ },
1275
+ required: ["severity", "file", "description"]
1276
+ }
1277
+ },
1278
+ securityConcerns: { type: "array", items: { type: "string" } },
1279
+ specAdherence: {
1280
+ type: "object",
1281
+ properties: {
1282
+ met: { type: "array", items: { type: "string" } },
1283
+ unmet: { type: "array", items: { type: "string" } }
1284
+ },
1285
+ required: ["met", "unmet"]
1286
+ }
1287
+ },
1288
+ required: ["approved", "summary", "issues", "securityConcerns", "specAdherence"]
1289
+ };
1290
+ function extractJSON2(text) {
1291
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
1292
+ if (jsonMatch) return jsonMatch[0];
1293
+ const fenceMatch = text.match(/```(?:json)?\s*\n?([\s\S]*?)\n?```/);
1294
+ if (fenceMatch) return fenceMatch[1].trim();
1295
+ throw new ReviewerError("Could not extract JSON from reviewer response");
1296
+ }
1297
+ function validateReviewResult(parsed) {
1298
+ const validated = ReviewResultSchema.safeParse(parsed);
1299
+ if (!validated.success) {
1300
+ const errors = validated.error.issues.map((i) => `${i.path.join(".")}: ${i.message}`).join(", ");
1301
+ throw new ReviewerError(`Reviewer response validation failed: ${errors}`);
1302
+ }
1303
+ return {
1304
+ ...validated.data,
1305
+ issues: validated.data.issues.map((issue) => ({
1306
+ ...issue,
1307
+ line: issue.line ?? void 0,
1308
+ suggestedFix: issue.suggestedFix ?? void 0
1309
+ }))
1310
+ };
1311
+ }
1312
+ async function runReviewerAgent(params) {
1313
+ const { taskSpec, diff, repoConfig, reviewCycle } = params;
1314
+ log6.info(
1315
+ { model: repoConfig.reviewerModel, cycle: reviewCycle },
1316
+ "Starting reviewer agent"
1317
+ );
1318
+ const userPrompt = buildReviewerUserPrompt(taskSpec, diff, reviewCycle);
1319
+ const agentResult = query3({
1320
+ prompt: userPrompt,
1321
+ options: {
1322
+ model: repoConfig.reviewerModel,
1323
+ maxTurns: 50,
1324
+ tools: [],
1325
+ systemPrompt: REVIEWER_SYSTEM_PROMPT,
1326
+ permissionMode: "bypassPermissions",
1327
+ allowDangerouslySkipPermissions: true,
1328
+ outputFormat: {
1329
+ type: "json_schema",
1330
+ schema: REVIEW_JSON_SCHEMA
1331
+ }
1332
+ }
1333
+ });
1334
+ let resultMessage;
1335
+ let lastAssistantText = "";
1336
+ for await (const message of agentResult) {
1337
+ if (message.type === "assistant" && "content" in message) {
1338
+ const textParts = message.content.filter((c) => c.type === "text" && c.text).map((c) => c.text);
1339
+ if (textParts.length > 0) {
1340
+ lastAssistantText = textParts.join("\n");
1341
+ }
1342
+ }
1343
+ if (message.type === "result") {
1344
+ resultMessage = message;
1345
+ }
1346
+ }
1347
+ if (!resultMessage) {
1348
+ throw new ReviewerError("No result message received from reviewer agent");
1349
+ }
1350
+ const isSuccess = resultMessage.subtype === "success";
1351
+ const isMaxTurns = resultMessage.subtype === "error_max_turns";
1352
+ if (!isSuccess && !isMaxTurns) {
1353
+ const errorDetail = "errors" in resultMessage ? resultMessage.errors.join(", ") : "unknown";
1354
+ throw new ReviewerError(`Reviewer agent failed (${resultMessage.subtype}): ${errorDetail}`);
1355
+ }
1356
+ if (isMaxTurns) {
1357
+ log6.warn("Reviewer hit max turns \u2014 attempting to extract result");
1358
+ }
1359
+ let parsed;
1360
+ if ("structured_output" in resultMessage && resultMessage.structured_output) {
1361
+ parsed = resultMessage.structured_output;
1362
+ } else if ("result" in resultMessage && resultMessage.result) {
1363
+ const jsonStr = extractJSON2(resultMessage.result);
1364
+ try {
1365
+ parsed = JSON.parse(jsonStr);
1366
+ } catch {
1367
+ throw new ReviewerError(`Reviewer returned invalid JSON: ${String(resultMessage.result).slice(0, 200)}`);
1368
+ }
1369
+ } else if (lastAssistantText) {
1370
+ log6.warn("No result output \u2014 extracting review from last assistant message");
1371
+ const jsonStr = extractJSON2(lastAssistantText);
1372
+ try {
1373
+ parsed = JSON.parse(jsonStr);
1374
+ } catch {
1375
+ throw new ReviewerError(
1376
+ `Reviewer returned invalid JSON in assistant message: ${lastAssistantText.slice(0, 200)}`
1377
+ );
1378
+ }
1379
+ } else {
1380
+ throw new ReviewerError("Reviewer agent produced no output");
1381
+ }
1382
+ const review = validateReviewResult(parsed);
1383
+ log6.info(
1384
+ {
1385
+ approved: review.approved,
1386
+ issueCount: review.issues.length,
1387
+ cycle: reviewCycle
1388
+ },
1389
+ "Review completed"
1390
+ );
1391
+ return review;
1392
+ }
1393
+
1394
+ // src/agents/security-scanner.ts
1395
+ import { query as query4 } from "@anthropic-ai/claude-agent-sdk";
1396
+ import { z as z4 } from "zod";
1397
+ var log7 = createChildLogger("security-scanner");
1398
+ var SecurityFindingSchema = z4.object({
1399
+ severity: z4.enum(["critical", "high", "medium", "low", "info"]),
1400
+ category: z4.string(),
1401
+ file: z4.string(),
1402
+ line: z4.number().nullable().optional(),
1403
+ description: z4.string(),
1404
+ recommendation: z4.string()
1405
+ });
1406
+ var SecurityReportSchema = z4.object({
1407
+ riskLevel: z4.enum(["critical", "high", "medium", "low", "none"]),
1408
+ findings: z4.array(SecurityFindingSchema),
1409
+ recommendations: z4.array(z4.string()),
1410
+ summary: z4.string()
1411
+ });
1412
+ var SECURITY_REPORT_JSON_SCHEMA = {
1413
+ type: "object",
1414
+ properties: {
1415
+ riskLevel: { type: "string", enum: ["critical", "high", "medium", "low", "none"] },
1416
+ findings: {
1417
+ type: "array",
1418
+ items: {
1419
+ type: "object",
1420
+ properties: {
1421
+ severity: { type: "string", enum: ["critical", "high", "medium", "low", "info"] },
1422
+ category: { type: "string" },
1423
+ file: { type: "string" },
1424
+ line: { type: ["number", "null"] },
1425
+ description: { type: "string" },
1426
+ recommendation: { type: "string" }
1427
+ },
1428
+ required: ["severity", "category", "file", "description", "recommendation"]
1429
+ }
1430
+ },
1431
+ recommendations: { type: "array", items: { type: "string" } },
1432
+ summary: { type: "string" }
1433
+ },
1434
+ required: ["riskLevel", "findings", "recommendations", "summary"]
1435
+ };
1436
+ var SECURITY_SCANNER_SYSTEM_PROMPT = `You are a security-focused code reviewer for an automated development pipeline.
1437
+ Your job is to analyze code diffs for security vulnerabilities and risks.
1438
+
1439
+ Focus areas:
1440
+ 1. **Injection**: SQL injection, command injection, XSS, template injection
1441
+ 2. **Authentication/Authorization**: Broken auth, missing access controls, privilege escalation
1442
+ 3. **Secrets**: Hardcoded credentials, API keys, tokens, connection strings
1443
+ 4. **Data Exposure**: Sensitive data in logs, error messages, or responses
1444
+ 5. **Dependencies**: Known vulnerable patterns, unsafe imports
1445
+ 6. **Input Validation**: Missing or inadequate validation, buffer overflows
1446
+ 7. **Cryptography**: Weak algorithms, improper random generation, insecure hashing
1447
+ 8. **Configuration**: Insecure defaults, debug mode in production, CORS misconfiguration
1448
+
1449
+ Rules:
1450
+ - Only report actual vulnerabilities visible in the diff, not hypothetical ones.
1451
+ - Be specific about file paths and line numbers.
1452
+ - Provide actionable recommendations.
1453
+ - Set riskLevel based on the highest severity finding (or "none" if no findings).
1454
+ - Respond ONLY with the JSON object. No markdown fences, no explanation text.`;
1455
+ function extractJSON3(text) {
1456
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
1457
+ if (jsonMatch) return jsonMatch[0];
1458
+ throw new PipelineError("Could not extract JSON from security scanner response");
1459
+ }
1460
+ function validateReport(parsed) {
1461
+ const validated = SecurityReportSchema.safeParse(parsed);
1462
+ if (!validated.success) {
1463
+ const errors = validated.error.issues.map((i) => `${i.path.join(".")}: ${i.message}`).join(", ");
1464
+ throw new PipelineError(`Security report validation failed: ${errors}`);
1465
+ }
1466
+ return {
1467
+ ...validated.data,
1468
+ findings: validated.data.findings.map((f) => ({
1469
+ ...f,
1470
+ line: f.line ?? void 0
1471
+ }))
1472
+ };
1473
+ }
1474
+ async function runSecurityScanner(params) {
1475
+ const { taskSpec, diff, repoConfig } = params;
1476
+ log7.info({ task: taskSpec.title, model: repoConfig.reviewerModel }, "Starting security scan");
1477
+ const prompt = `## Security Review Request
1478
+
1479
+ **Task**: ${taskSpec.title}
1480
+ **Type**: ${taskSpec.type}
1481
+
1482
+ ## Code Diff to Analyze
1483
+
1484
+ \`\`\`diff
1485
+ ${diff}
1486
+ \`\`\`
1487
+
1488
+ Analyze this diff for security vulnerabilities. Provide your report as a JSON object matching the required schema.`;
1489
+ const result = query4({
1490
+ prompt,
1491
+ options: {
1492
+ model: repoConfig.reviewerModel,
1493
+ maxTurns: 50,
1494
+ tools: [],
1495
+ systemPrompt: SECURITY_SCANNER_SYSTEM_PROMPT,
1496
+ permissionMode: "bypassPermissions",
1497
+ allowDangerouslySkipPermissions: true,
1498
+ outputFormat: {
1499
+ type: "json_schema",
1500
+ schema: SECURITY_REPORT_JSON_SCHEMA
1501
+ }
1502
+ }
1503
+ });
1504
+ let resultMessage;
1505
+ let lastAssistantText = "";
1506
+ for await (const message of result) {
1507
+ if (message.type === "assistant" && "content" in message) {
1508
+ const textParts = message.content.filter((c) => c.type === "text" && c.text).map((c) => c.text);
1509
+ if (textParts.length > 0) {
1510
+ lastAssistantText = textParts.join("\n");
1511
+ }
1512
+ }
1513
+ if (message.type === "result") {
1514
+ resultMessage = message;
1515
+ }
1516
+ }
1517
+ if (!resultMessage) {
1518
+ throw new PipelineError("No result message received from security scanner");
1519
+ }
1520
+ const isSuccess = resultMessage.subtype === "success";
1521
+ const isMaxTurns = resultMessage.subtype === "error_max_turns";
1522
+ if (!isSuccess && !isMaxTurns) {
1523
+ const errorDetail = "errors" in resultMessage ? resultMessage.errors.join(", ") : "unknown";
1524
+ throw new PipelineError(
1525
+ `Security scanner failed (${resultMessage.subtype}): ${errorDetail}`
1526
+ );
1527
+ }
1528
+ let parsed;
1529
+ if ("structured_output" in resultMessage && resultMessage.structured_output) {
1530
+ parsed = resultMessage.structured_output;
1531
+ } else if ("result" in resultMessage && resultMessage.result) {
1532
+ const jsonStr = extractJSON3(resultMessage.result);
1533
+ try {
1534
+ parsed = JSON.parse(jsonStr);
1535
+ } catch {
1536
+ throw new PipelineError(
1537
+ `Security scanner returned invalid JSON: ${String(resultMessage.result).slice(0, 200)}`
1538
+ );
1539
+ }
1540
+ } else if (lastAssistantText) {
1541
+ log7.warn("No result output \u2014 extracting report from last assistant message");
1542
+ const jsonStr = extractJSON3(lastAssistantText);
1543
+ try {
1544
+ parsed = JSON.parse(jsonStr);
1545
+ } catch {
1546
+ throw new PipelineError(
1547
+ `Security scanner returned invalid JSON in assistant message: ${lastAssistantText.slice(0, 200)}`
1548
+ );
1549
+ }
1550
+ } else {
1551
+ throw new PipelineError("Security scanner produced no output");
1552
+ }
1553
+ const report = validateReport(parsed);
1554
+ log7.info(
1555
+ {
1556
+ riskLevel: report.riskLevel,
1557
+ findingCount: report.findings.length
1558
+ },
1559
+ "Security scan completed"
1560
+ );
1561
+ return {
1562
+ report,
1563
+ costUsd: resultMessage.total_cost_usd
1564
+ };
1565
+ }
1566
+
1567
+ // src/testing/test-runner.ts
1568
+ import { execa } from "execa";
1569
+ var log8 = createChildLogger("test-runner");
1570
+ async function runCommand(params) {
1571
+ const { workingDir, command, kind, timeoutMs = 3e5 } = params;
1572
+ const [cmd, ...args] = command.split(" ");
1573
+ log8.info({ kind, command, workingDir }, `Running ${kind} command`);
1574
+ const start = Date.now();
1575
+ try {
1576
+ const result = await execa(cmd, args, {
1577
+ cwd: workingDir,
1578
+ timeout: timeoutMs,
1579
+ reject: false,
1580
+ all: true
1581
+ });
1582
+ const durationMs = Date.now() - start;
1583
+ const passed = result.exitCode === 0;
1584
+ log8.info({ kind, passed, durationMs, exitCode: result.exitCode }, `${kind} completed`);
1585
+ return {
1586
+ passed,
1587
+ output: result.all ?? result.stdout + "\n" + result.stderr,
1588
+ durationMs
1589
+ };
1590
+ } catch (error) {
1591
+ const durationMs = Date.now() - start;
1592
+ const message = error instanceof Error ? error.message : String(error);
1593
+ log8.error({ kind, error: message, durationMs }, `${kind} execution failed`);
1594
+ return {
1595
+ passed: false,
1596
+ output: message,
1597
+ durationMs
1598
+ };
1599
+ }
1600
+ }
1601
+ async function runTests(params) {
1602
+ return runCommand({
1603
+ workingDir: params.workingDir,
1604
+ command: params.testCommand,
1605
+ kind: "test",
1606
+ timeoutMs: params.timeoutMs
1607
+ });
1608
+ }
1609
+
1610
+ // src/notifications/sender.ts
1611
+ var log9 = createChildLogger("notifications");
1612
+ async function sendSlackWebhook(webhookUrl, message) {
1613
+ try {
1614
+ const response = await fetch(webhookUrl, {
1615
+ method: "POST",
1616
+ headers: { "Content-Type": "application/json" },
1617
+ body: JSON.stringify(message)
1618
+ });
1619
+ if (!response.ok) {
1620
+ log9.warn({ status: response.status }, "Slack webhook request failed");
1621
+ }
1622
+ } catch (error) {
1623
+ const msg = error instanceof Error ? error.message : String(error);
1624
+ log9.warn({ error: msg }, "Failed to send Slack notification");
1625
+ }
1626
+ }
1627
+ function formatSlackMessage(state, event) {
1628
+ const taskTitle = state.taskSpec?.title ?? "Unknown task";
1629
+ const pipelineId = state.id;
1630
+ const cost = `$${state.totalCostUsd.toFixed(4)}`;
1631
+ const eventDescriptions = {
1632
+ started: `Pipeline started for: *${taskTitle}*`,
1633
+ tests_passed: `Tests passed for: *${taskTitle}*`,
1634
+ tests_failed: `Tests failed for: *${taskTitle}*`,
1635
+ review_approved: `Review approved for: *${taskTitle}*`,
1636
+ review_rejected: `Review rejected for: *${taskTitle}* (cycle ${state.reviewCycle + 1})`,
1637
+ completed: `Pipeline completed for: *${taskTitle}*
1638
+ PR: ${state.prUrl ?? "N/A"}
1639
+ Cost: ${cost}`,
1640
+ failed: `Pipeline failed for: *${taskTitle}*
1641
+ Error: ${state.error ?? "Unknown"}
1642
+ Cost: ${cost}`
1643
+ };
1644
+ const icons = {
1645
+ started: ":rocket:",
1646
+ tests_passed: ":white_check_mark:",
1647
+ tests_failed: ":x:",
1648
+ review_approved: ":thumbsup:",
1649
+ review_rejected: ":eyes:",
1650
+ completed: ":tada:",
1651
+ failed: ":rotating_light:"
1652
+ };
1653
+ return {
1654
+ text: `${icons[event]} [X-Force ${pipelineId}] ${eventDescriptions[event]}`
1655
+ };
1656
+ }
1657
+ async function postGitHubMentions(config, owner, repo, issueNumber, event, state) {
1658
+ const github = config.github;
1659
+ if (!github) return;
1660
+ let mentions = [];
1661
+ let message = "";
1662
+ if (event === "failed" && github.mentionOnFailure?.length) {
1663
+ mentions = github.mentionOnFailure;
1664
+ message = `X-Force pipeline failed. ${mentions.join(" ")} \u2014 please review.
1665
+
1666
+ **Error**: ${state.error ?? "Unknown"}`;
1667
+ }
1668
+ if ((event === "completed" || event === "review_approved") && github.mentionOnReview?.length) {
1669
+ mentions = github.mentionOnReview;
1670
+ message = `X-Force pipeline completed. ${mentions.join(" ")} \u2014 PR ready for review: ${state.prUrl ?? "N/A"}`;
1671
+ }
1672
+ if (message) {
1673
+ try {
1674
+ await addComment(owner, repo, issueNumber, message);
1675
+ } catch (error) {
1676
+ const msg = error instanceof Error ? error.message : String(error);
1677
+ log9.warn({ error: msg }, "Failed to post GitHub mention");
1678
+ }
1679
+ }
1680
+ }
1681
+ async function notify(params) {
1682
+ const { config, state, event, owner, repo, issueNumber } = params;
1683
+ if (!config) return;
1684
+ const promises = [];
1685
+ if (config.slack?.webhookUrl) {
1686
+ const message = formatSlackMessage(state, event);
1687
+ promises.push(sendSlackWebhook(config.slack.webhookUrl, message));
1688
+ }
1689
+ promises.push(postGitHubMentions(config, owner, repo, issueNumber, event, state));
1690
+ await Promise.allSettled(promises);
1691
+ }
1692
+
1693
+ // src/pipeline/auto-merge.ts
1694
+ var log10 = createChildLogger("auto-merge");
1695
+ var SIZE_ORDER = {
1696
+ xs: 1,
1697
+ s: 2,
1698
+ m: 3,
1699
+ l: 4,
1700
+ xl: 5
1701
+ };
1702
+ function isAutoMergeEligible(params) {
1703
+ const { taskSpec, repoConfig, securityReport } = params;
1704
+ const rules = repoConfig.autoMergeRules;
1705
+ const reasons = [];
1706
+ if (!repoConfig.autoMerge) {
1707
+ return { eligible: false, reasons: ["Auto-merge is disabled for this repo"] };
1708
+ }
1709
+ if (!rules.types.includes(taskSpec.type)) {
1710
+ reasons.push(
1711
+ `Task type "${taskSpec.type}" is not in allowed types: [${rules.types.join(", ")}]`
1712
+ );
1713
+ }
1714
+ if (SIZE_ORDER[taskSpec.size] > SIZE_ORDER[rules.maxSize]) {
1715
+ reasons.push(`Task size "${taskSpec.size}" exceeds max size "${rules.maxSize}"`);
1716
+ }
1717
+ if (rules.requireCleanSecurityScan && securityReport) {
1718
+ const hasCriticalOrHigh = securityReport.findings.some(
1719
+ (f) => f.severity === "critical" || f.severity === "high"
1720
+ );
1721
+ if (hasCriticalOrHigh) {
1722
+ reasons.push("Security scan found critical or high severity issues");
1723
+ }
1724
+ }
1725
+ if (reasons.length > 0) {
1726
+ log10.info({ reasons }, "PR not eligible for auto-merge");
1727
+ }
1728
+ return { eligible: reasons.length === 0, reasons };
1729
+ }
1730
+
1731
+ // src/tracking/cost-tracker.ts
1732
+ import { homedir } from "os";
1733
+ import { join as join2 } from "path";
1734
+ import { mkdir, appendFile, readFile } from "fs/promises";
1735
+ var log11 = createChildLogger("cost-tracker");
1736
+ var HISTORY_DIR = join2(homedir(), ".xforce");
1737
+ var HISTORY_FILE = "history.jsonl";
1738
+ function getHistoryPath(basePath) {
1739
+ return join2(basePath ?? HISTORY_DIR, HISTORY_FILE);
1740
+ }
1741
+ function buildRecordFromState(state, repoConfig) {
1742
+ const startedAt = state.startedAt instanceof Date ? state.startedAt : new Date(state.startedAt);
1743
+ const completedAt = state.completedAt instanceof Date ? state.completedAt : new Date(state.completedAt ?? Date.now());
1744
+ return {
1745
+ id: state.id,
1746
+ repo: `${repoConfig.owner}/${repoConfig.name}`,
1747
+ issueNumber: state.taskSpec.issueNumber,
1748
+ issueUrl: state.taskSpec.issueUrl,
1749
+ prNumber: state.prNumber,
1750
+ prUrl: state.prUrl,
1751
+ status: state.status === "failed" ? "failed" : "completed",
1752
+ totalCostUsd: state.totalCostUsd,
1753
+ durationMs: completedAt.getTime() - startedAt.getTime(),
1754
+ reviewCycles: state.reviewCycle,
1755
+ model: repoConfig.model,
1756
+ startedAt: startedAt.toISOString(),
1757
+ completedAt: completedAt.toISOString(),
1758
+ taskTitle: state.taskSpec.title,
1759
+ taskType: state.taskSpec.type,
1760
+ taskSize: state.taskSpec.size,
1761
+ error: state.error
1762
+ };
1763
+ }
1764
+ async function appendRecord(record, basePath) {
1765
+ const dir = basePath ?? HISTORY_DIR;
1766
+ await mkdir(dir, { recursive: true });
1767
+ const filePath = join2(dir, HISTORY_FILE);
1768
+ const line = JSON.stringify(record) + "\n";
1769
+ await appendFile(filePath, line, "utf-8");
1770
+ log11.debug({ id: record.id, repo: record.repo }, "Persisted pipeline run record");
1771
+ }
1772
+ async function readRecords(filter, basePath) {
1773
+ const filePath = getHistoryPath(basePath);
1774
+ let content;
1775
+ try {
1776
+ content = await readFile(filePath, "utf-8");
1777
+ } catch {
1778
+ return [];
1779
+ }
1780
+ const records = [];
1781
+ for (const line of content.split("\n")) {
1782
+ const trimmed = line.trim();
1783
+ if (!trimmed) continue;
1784
+ try {
1785
+ const record = JSON.parse(trimmed);
1786
+ if (filter?.repo && record.repo !== filter.repo) continue;
1787
+ if (filter?.since && new Date(record.startedAt) < filter.since) continue;
1788
+ if (filter?.until && new Date(record.startedAt) > filter.until) continue;
1789
+ records.push(record);
1790
+ } catch {
1791
+ log11.warn("Skipping malformed line in history file");
1792
+ }
1793
+ }
1794
+ return records;
1795
+ }
1796
+ function summarize(records) {
1797
+ if (records.length === 0) {
1798
+ return {
1799
+ totalCostUsd: 0,
1800
+ totalRuns: 0,
1801
+ successfulRuns: 0,
1802
+ failedRuns: 0,
1803
+ avgCostPerRun: 0,
1804
+ avgDurationMs: 0,
1805
+ costByRepo: {}
1806
+ };
1807
+ }
1808
+ const totalCostUsd = records.reduce((sum, r) => sum + r.totalCostUsd, 0);
1809
+ const totalDurationMs = records.reduce((sum, r) => sum + r.durationMs, 0);
1810
+ const successfulRuns = records.filter((r) => r.status === "completed").length;
1811
+ const failedRuns = records.filter((r) => r.status === "failed").length;
1812
+ const costByRepo = {};
1813
+ for (const r of records) {
1814
+ costByRepo[r.repo] = (costByRepo[r.repo] ?? 0) + r.totalCostUsd;
1815
+ }
1816
+ return {
1817
+ totalCostUsd,
1818
+ totalRuns: records.length,
1819
+ successfulRuns,
1820
+ failedRuns,
1821
+ avgCostPerRun: totalCostUsd / records.length,
1822
+ avgDurationMs: totalDurationMs / records.length,
1823
+ costByRepo
1824
+ };
1825
+ }
1826
+
1827
+ // src/pipeline/orchestrator.ts
1828
+ var log12 = createChildLogger("orchestrator");
1829
+ function transition(state, to, message) {
1830
+ validateTransition(state.status, to);
1831
+ state.status = to;
1832
+ state.logs.push({ timestamp: /* @__PURE__ */ new Date(), status: to, message });
1833
+ log12.info({ status: to }, message);
1834
+ }
1835
+ async function runPipeline(params) {
1836
+ const { config } = params;
1837
+ let owner;
1838
+ let repo;
1839
+ let issueNumber;
1840
+ if (params.issueUrl) {
1841
+ const parsed = parseIssueUrl(params.issueUrl);
1842
+ owner = parsed.owner;
1843
+ repo = parsed.repo;
1844
+ issueNumber = parsed.issueNumber;
1845
+ } else if (params.repoOwner && params.repoName && params.issueNumber) {
1846
+ owner = params.repoOwner;
1847
+ repo = params.repoName;
1848
+ issueNumber = params.issueNumber;
1849
+ } else {
1850
+ throw new PipelineError("Either issueUrl or (repoOwner + repoName + issueNumber) is required");
1851
+ }
1852
+ const repoConfig = resolveRepoConfig(config, owner, repo);
1853
+ const state = {
1854
+ id: nanoid(12),
1855
+ taskSpec: null,
1856
+ status: "parsing_issue",
1857
+ branchName: "",
1858
+ reviewCycle: 0,
1859
+ testRetry: 0,
1860
+ totalCostUsd: 0,
1861
+ logs: [{ timestamp: /* @__PURE__ */ new Date(), status: "parsing_issue", message: "Pipeline started" }],
1862
+ startedAt: /* @__PURE__ */ new Date()
1863
+ };
1864
+ let workDir = null;
1865
+ let isLocal = false;
1866
+ let branchGit = null;
1867
+ const timeoutMs = repoConfig.timeoutMinutes * 60 * 1e3;
1868
+ try {
1869
+ await removeLabel(owner, repo, issueNumber, repoConfig.labels.done).catch(() => {
1870
+ });
1871
+ await removeLabel(owner, repo, issueNumber, repoConfig.labels.failed).catch(() => {
1872
+ });
1873
+ await addLabel(owner, repo, issueNumber, repoConfig.labels.inProgress);
1874
+ log12.info({ owner, repo, issueNumber }, "Fetching and parsing issue");
1875
+ const issue = await getIssue(owner, repo, issueNumber);
1876
+ const labels = await getIssueLabels(owner, repo, issueNumber);
1877
+ const taskSpec = parseIssueBody({
1878
+ title: issue.title,
1879
+ body: issue.body ?? "",
1880
+ labels,
1881
+ issueNumber,
1882
+ issueUrl: issue.html_url,
1883
+ repoOwner: owner,
1884
+ repoName: repo
1885
+ });
1886
+ state.taskSpec = taskSpec;
1887
+ await notify({ config: config.notifications, state, event: "started", owner, repo, issueNumber });
1888
+ transition(state, "creating_branch", "Setting up repository and branch");
1889
+ const branchResult = await setupBranch({
1890
+ owner,
1891
+ repo,
1892
+ defaultBranch: repoConfig.defaultBranch,
1893
+ issueNumber,
1894
+ issueTitle: taskSpec.title,
1895
+ branchPrefix: repoConfig.branchPrefix,
1896
+ localDir: params.localDir ?? repoConfig.localPath
1897
+ });
1898
+ const { branchName, git } = branchResult;
1899
+ workDir = branchResult.workDir;
1900
+ isLocal = branchResult.isLocal;
1901
+ branchGit = git;
1902
+ state.branchName = branchName;
1903
+ let plan;
1904
+ if (repoConfig.enablePlanning) {
1905
+ transition(state, "planning", `Planning with ${repoConfig.plannerModel}`);
1906
+ const planSpinner = ora(`Planning with ${repoConfig.plannerModel}`).start();
1907
+ const planningResult = await runPlanningAgent({
1908
+ taskSpec,
1909
+ repoConfig,
1910
+ workingDir: workDir,
1911
+ onProgress: (msg) => {
1912
+ planSpinner.text = `Planning: ${msg}`;
1913
+ }
1914
+ });
1915
+ plan = planningResult.plan;
1916
+ state.plan = plan;
1917
+ state.totalCostUsd += planningResult.costUsd;
1918
+ planSpinner.succeed(`Plan created (${plan.implementationSteps.length} steps, ${plan.estimatedComplexity} complexity, $${planningResult.costUsd.toFixed(2)})`);
1919
+ await addComment(owner, repo, issueNumber, formatPlanComment(plan));
1920
+ }
1921
+ await withTimeout(
1922
+ codeReviewLoop(state, taskSpec, repoConfig, git, workDir, owner, repo, issueNumber, config.notifications, plan),
1923
+ timeoutMs,
1924
+ "Pipeline"
1925
+ );
1926
+ await removeLabel(owner, repo, issueNumber, repoConfig.labels.inProgress);
1927
+ await addLabel(owner, repo, issueNumber, repoConfig.labels.done);
1928
+ await addComment(
1929
+ owner,
1930
+ repo,
1931
+ issueNumber,
1932
+ `X-Force pipeline completed.
1933
+
1934
+ - **PR**: ${state.prUrl}
1935
+ - **Cost**: $${state.totalCostUsd.toFixed(4)}
1936
+ - **Review cycles**: ${state.reviewCycle}`
1937
+ );
1938
+ await notify({ config: config.notifications, state, event: "completed", owner, repo, issueNumber });
1939
+ state.completedAt = /* @__PURE__ */ new Date();
1940
+ await appendRecord(buildRecordFromState(state, repoConfig)).catch((err) => {
1941
+ log12.warn({ error: err.message }, "Failed to persist cost tracking record");
1942
+ });
1943
+ if (state.status === "merging") {
1944
+ transition(state, "completed", "PR auto-merged");
1945
+ } else if (state.status !== "awaiting_human") {
1946
+ transition(state, "awaiting_human", "PR ready for human review");
1947
+ }
1948
+ return state;
1949
+ } catch (error) {
1950
+ const message = error instanceof Error ? error.message : String(error);
1951
+ state.status = "failed";
1952
+ state.error = message;
1953
+ state.completedAt = /* @__PURE__ */ new Date();
1954
+ log12.error({ error: message }, "Pipeline failed");
1955
+ await notify({ config: config.notifications, state, event: "failed", owner, repo, issueNumber }).catch(() => {
1956
+ });
1957
+ await appendRecord(buildRecordFromState(state, repoConfig)).catch(() => {
1958
+ });
1959
+ try {
1960
+ await removeLabel(owner, repo, issueNumber, repoConfig.labels.inProgress);
1961
+ await addLabel(owner, repo, issueNumber, repoConfig.labels.failed);
1962
+ await addComment(
1963
+ owner,
1964
+ repo,
1965
+ issueNumber,
1966
+ `X-Force pipeline failed.
1967
+
1968
+ **Error**: ${message}
1969
+ **Cost**: $${state.totalCostUsd.toFixed(4)}`
1970
+ );
1971
+ } catch {
1972
+ log12.warn("Failed to update issue labels/comments after pipeline failure");
1973
+ }
1974
+ return state;
1975
+ } finally {
1976
+ if (isLocal && branchGit) {
1977
+ await restoreDefaultBranch(branchGit, repoConfig.defaultBranch);
1978
+ } else if (workDir) {
1979
+ try {
1980
+ await rm(workDir, { recursive: true, force: true });
1981
+ } catch {
1982
+ log12.warn({ workDir }, "Failed to clean up working directory");
1983
+ }
1984
+ }
1985
+ }
1986
+ }
1987
+ async function codeReviewLoop(state, taskSpec, repoConfig, git, workDir, owner, repo, issueNumber, notificationsConfig, plan) {
1988
+ let previousReview;
1989
+ let sessionId;
1990
+ for (let cycle = 0; cycle <= repoConfig.maxReviewCycles; cycle++) {
1991
+ state.reviewCycle = cycle;
1992
+ transition(state, "coding", `Coding with ${repoConfig.model} (cycle ${cycle + 1})`);
1993
+ const codeSpinner = ora(`Coding with ${repoConfig.model} (cycle ${cycle + 1})`).start();
1994
+ const codingResult = await runCodingAgent({
1995
+ taskSpec,
1996
+ repoConfig,
1997
+ workingDir: workDir,
1998
+ previousReview,
1999
+ sessionId,
2000
+ plan: cycle === 0 ? plan : void 0,
2001
+ onProgress: (msg) => {
2002
+ codeSpinner.text = `Coding: ${msg}`;
2003
+ }
2004
+ });
2005
+ state.totalCostUsd += codingResult.costUsd;
2006
+ sessionId = codingResult.sessionId;
2007
+ codeSpinner.succeed(`Coding complete (${codingResult.numTurns} turns, $${codingResult.costUsd.toFixed(2)})`);
2008
+ const commitSpinner = ora("Committing and pushing changes").start();
2009
+ const sha = await commitAndPush({
2010
+ git,
2011
+ branchName: state.branchName,
2012
+ message: `feat: ${taskSpec.title} (xforce cycle ${cycle + 1})`
2013
+ });
2014
+ if (!sha && cycle === 0) {
2015
+ commitSpinner.fail("No changes to commit");
2016
+ throw new PipelineError("Coding agent made no changes");
2017
+ }
2018
+ commitSpinner.succeed(sha ? `Committed ${sha.slice(0, 7)}` : "No new changes");
2019
+ transition(state, "running_tests", "Running verification checks");
2020
+ let allChecksPassed = false;
2021
+ for (let retry = 0; retry <= repoConfig.maxTestRetries; retry++) {
2022
+ state.testRetry = retry;
2023
+ let failedCheck = null;
2024
+ const retryLabel = retry > 0 ? ` (retry ${retry})` : "";
2025
+ if (repoConfig.lintCommand && !failedCheck) {
2026
+ const lintSpinner = ora(`Running lint${retryLabel}`).start();
2027
+ const lintResult = await runCommand({
2028
+ workingDir: workDir,
2029
+ command: repoConfig.lintCommand,
2030
+ kind: "lint"
2031
+ });
2032
+ if (lintResult.passed) {
2033
+ lintSpinner.succeed(`Lint passed (${(lintResult.durationMs / 1e3).toFixed(1)}s)`);
2034
+ } else {
2035
+ lintSpinner.fail("Lint failed");
2036
+ failedCheck = { kind: "lint", output: lintResult.output };
2037
+ }
2038
+ }
2039
+ if (repoConfig.buildCommand && !failedCheck) {
2040
+ const buildSpinner = ora(`Running build${retryLabel}`).start();
2041
+ const buildResult = await runCommand({
2042
+ workingDir: workDir,
2043
+ command: repoConfig.buildCommand,
2044
+ kind: "build"
2045
+ });
2046
+ if (buildResult.passed) {
2047
+ buildSpinner.succeed(`Build passed (${(buildResult.durationMs / 1e3).toFixed(1)}s)`);
2048
+ } else {
2049
+ buildSpinner.fail("Build failed");
2050
+ failedCheck = { kind: "build", output: buildResult.output };
2051
+ }
2052
+ }
2053
+ if (!failedCheck) {
2054
+ const testSpinner = ora(`Running tests${retryLabel}`).start();
2055
+ const testResult = await runTests({
2056
+ workingDir: workDir,
2057
+ testCommand: repoConfig.testCommand
2058
+ });
2059
+ if (testResult.passed) {
2060
+ testSpinner.succeed(`Tests passed (${(testResult.durationMs / 1e3).toFixed(1)}s)`);
2061
+ } else {
2062
+ testSpinner.fail("Tests failed");
2063
+ failedCheck = { kind: "test", output: testResult.output };
2064
+ }
2065
+ }
2066
+ if (repoConfig.runCommand && !failedCheck) {
2067
+ const runSpinner = ora(`Running verification${retryLabel}`).start();
2068
+ const runResult = await runCommand({
2069
+ workingDir: workDir,
2070
+ command: repoConfig.runCommand,
2071
+ kind: "run"
2072
+ });
2073
+ if (runResult.passed) {
2074
+ runSpinner.succeed(`Verification passed (${(runResult.durationMs / 1e3).toFixed(1)}s)`);
2075
+ } else {
2076
+ runSpinner.fail("Verification failed");
2077
+ failedCheck = { kind: "run", output: runResult.output };
2078
+ }
2079
+ }
2080
+ if (!failedCheck) {
2081
+ allChecksPassed = true;
2082
+ break;
2083
+ }
2084
+ if (retry < repoConfig.maxTestRetries) {
2085
+ transition(state, "coding", `Fixing ${failedCheck.kind} failures (retry ${retry + 1})`);
2086
+ const fixSpinner = ora(`Fixing ${failedCheck.kind} failures (retry ${retry + 1})`).start();
2087
+ const feedback = formatCommandFeedback(failedCheck.kind, failedCheck.output);
2088
+ const fixResult = await runCodingAgent({
2089
+ taskSpec,
2090
+ repoConfig,
2091
+ workingDir: workDir,
2092
+ ...failedCheck.kind === "test" ? { testFailures: feedback } : { commandFailures: { kind: failedCheck.kind, output: failedCheck.output } },
2093
+ sessionId,
2094
+ onProgress: (msg) => {
2095
+ fixSpinner.text = `Fixing ${failedCheck.kind}: ${msg}`;
2096
+ }
2097
+ });
2098
+ state.totalCostUsd += fixResult.costUsd;
2099
+ sessionId = fixResult.sessionId;
2100
+ fixSpinner.succeed(`Fix applied (${fixResult.numTurns} turns, $${fixResult.costUsd.toFixed(2)})`);
2101
+ await commitAndPush({
2102
+ git,
2103
+ branchName: state.branchName,
2104
+ message: `fix: address ${failedCheck.kind} failures (xforce retry ${retry + 1})`
2105
+ });
2106
+ transition(state, "running_tests", `Re-running verification checks (retry ${retry + 1})`);
2107
+ }
2108
+ }
2109
+ if (!allChecksPassed) {
2110
+ throw new PipelineError(
2111
+ `Verification checks still failing after ${repoConfig.maxTestRetries} retries`
2112
+ );
2113
+ }
2114
+ if (!state.prNumber) {
2115
+ const { prNumber, prUrl } = await createPullRequest({
2116
+ owner,
2117
+ repo,
2118
+ branchName: state.branchName,
2119
+ defaultBranch: repoConfig.defaultBranch,
2120
+ taskSpec,
2121
+ pipeline: state
2122
+ });
2123
+ state.prNumber = prNumber;
2124
+ state.prUrl = prUrl;
2125
+ } else {
2126
+ await updatePullRequest({
2127
+ owner,
2128
+ repo,
2129
+ prNumber: state.prNumber,
2130
+ taskSpec,
2131
+ pipeline: state
2132
+ });
2133
+ }
2134
+ transition(state, "reviewing", `Reviewing with ${repoConfig.reviewerModel} (cycle ${cycle + 1})`);
2135
+ const reviewSpinner = ora(`Reviewing with ${repoConfig.reviewerModel} (cycle ${cycle + 1})`).start();
2136
+ const diff = await getPRDiff(owner, repo, state.prNumber);
2137
+ const [review, securityResult] = await Promise.all([
2138
+ runReviewerAgent({ taskSpec, diff, repoConfig, reviewCycle: cycle }),
2139
+ repoConfig.enableSecurityScan ? runSecurityScanner({ taskSpec, diff, repoConfig }) : Promise.resolve(void 0)
2140
+ ]);
2141
+ if (securityResult) {
2142
+ state.totalCostUsd += securityResult.costUsd;
2143
+ }
2144
+ const reviewStatus = review.approved ? "Approved" : `Changes requested (${review.issues.length} issues)`;
2145
+ reviewSpinner.succeed(`Review: ${reviewStatus}`);
2146
+ await commentOnPR(
2147
+ owner,
2148
+ repo,
2149
+ state.prNumber,
2150
+ formatReviewComment(review, cycle)
2151
+ );
2152
+ if (securityResult && securityResult.report.findings.length > 0) {
2153
+ await commentOnPR(
2154
+ owner,
2155
+ repo,
2156
+ state.prNumber,
2157
+ formatSecurityComment(securityResult.report)
2158
+ );
2159
+ }
2160
+ if (review.approved) {
2161
+ await labelPR(owner, repo, state.prNumber, repoConfig.labels.done);
2162
+ const autoMergeCheck = isAutoMergeEligible({
2163
+ taskSpec,
2164
+ repoConfig,
2165
+ securityReport: securityResult?.report
2166
+ });
2167
+ if (autoMergeCheck.eligible) {
2168
+ transition(state, "merging", "Auto-merging approved PR");
2169
+ const mergeSpinner = ora("Auto-merging approved PR").start();
2170
+ const mergeResult = await mergePR({
2171
+ owner,
2172
+ repo,
2173
+ prNumber: state.prNumber,
2174
+ strategy: repoConfig.autoMergeRules.mergeStrategy,
2175
+ commitTitle: `${taskSpec.title} (#${state.prNumber})`
2176
+ });
2177
+ if (mergeResult.merged) {
2178
+ mergeSpinner.succeed(`PR auto-merged (${mergeResult.sha?.slice(0, 7)})`);
2179
+ await addComment(
2180
+ owner,
2181
+ repo,
2182
+ issueNumber,
2183
+ `X-Force auto-merged PR #${state.prNumber} (${repoConfig.autoMergeRules.mergeStrategy}).`
2184
+ );
2185
+ } else {
2186
+ mergeSpinner.fail(`Auto-merge failed: ${mergeResult.error}`);
2187
+ await commentOnPR(
2188
+ owner,
2189
+ repo,
2190
+ state.prNumber,
2191
+ `Auto-merge failed: ${mergeResult.error}
2192
+
2193
+ This PR requires manual merge.`
2194
+ );
2195
+ transition(state, "awaiting_human", "Auto-merge failed, awaiting human review");
2196
+ }
2197
+ }
2198
+ return;
2199
+ }
2200
+ if (cycle < repoConfig.maxReviewCycles) {
2201
+ previousReview = review;
2202
+ transition(state, "addressing_review", `Addressing review feedback (cycle ${cycle + 1})`);
2203
+ }
2204
+ }
2205
+ throw new PipelineError(
2206
+ `Review not approved after ${repoConfig.maxReviewCycles} cycles`
2207
+ );
2208
+ }
2209
+ function formatSecurityComment(report) {
2210
+ const findingsTable = report.findings.map(
2211
+ (f) => `| ${f.severity} | ${f.category} | \`${f.file}${f.line ? ":" + f.line : ""}\` | ${f.description} |`
2212
+ ).join("\n");
2213
+ return `## X-Force Security Scan - Risk Level: ${report.riskLevel.toUpperCase()}
2214
+
2215
+ ### Summary
2216
+ ${report.summary}
2217
+
2218
+ ### Findings
2219
+ | Severity | Category | File | Description |
2220
+ |----------|----------|------|-------------|
2221
+ ${findingsTable}
2222
+
2223
+ ${report.recommendations.length > 0 ? `### Recommendations
2224
+ ${report.recommendations.map((r) => `- ${r}`).join("\n")}` : ""}
2225
+
2226
+ ---
2227
+ *Security scan by X-Force AI Pipeline*`;
2228
+ }
2229
+ function formatPlanComment(plan) {
2230
+ const steps = plan.implementationSteps.sort((a, b) => a.order - b.order).map(
2231
+ (s) => `${s.order}. **${s.description}**
2232
+ Files: ${s.files.map((f) => `\`${f}\``).join(", ")}`
2233
+ ).join("\n");
2234
+ return `## X-Force Implementation Plan
2235
+
2236
+ ### Approach
2237
+ ${plan.approach}
2238
+
2239
+ ### Estimated Complexity: ${plan.estimatedComplexity}
2240
+
2241
+ ### Files to Modify
2242
+ ${plan.filesToModify.map((f) => `- \`${f}\``).join("\n") || "None"}
2243
+
2244
+ ### Files to Create
2245
+ ${plan.filesToCreate.map((f) => `- \`${f}\``).join("\n") || "None"}
2246
+
2247
+ ### Implementation Steps
2248
+ ${steps}
2249
+
2250
+ ### Risks
2251
+ ${plan.risks.map((r) => `- ${r}`).join("\n") || "None identified"}
2252
+
2253
+ ---
2254
+ *Plan generated by X-Force AI Pipeline*`;
2255
+ }
2256
+ function formatReviewComment(review, cycle) {
2257
+ const status = review.approved ? "APPROVED" : "CHANGES REQUESTED";
2258
+ const emoji = review.approved ? "approved" : "requesting changes";
2259
+ const issueTable = review.issues.length > 0 ? `### Issues Found
2260
+ | Severity | File | Description |
2261
+ |----------|------|-------------|
2262
+ ${review.issues.map((i) => `| ${i.severity} | \`${i.file}${i.line ? ":" + i.line : ""}\` | ${i.description} |`).join("\n")}
2263
+ ` : "";
2264
+ return `## X-Force AI Review (Cycle ${cycle + 1}) - ${status}
2265
+
2266
+ ### Summary
2267
+ ${review.summary}
2268
+
2269
+ ${issueTable}
2270
+ ### Spec Adherence
2271
+ **Met**: ${review.specAdherence.met.join(", ") || "None yet"}
2272
+ **Unmet**: ${review.specAdherence.unmet.join(", ") || "All met"}
2273
+
2274
+ ${review.securityConcerns.length > 0 ? `### Security Concerns
2275
+ ${review.securityConcerns.map((c) => `- ${c}`).join("\n")}` : ""}
2276
+
2277
+ ---
2278
+ *Automated review by X-Force (${emoji})*`;
2279
+ }
2280
+
2281
+ // src/server/webhook.ts
2282
+ import { createServer } from "http";
2283
+ import crypto from "crypto";
2284
+
2285
+ // src/server/queue.ts
2286
+ import { nanoid as nanoid2 } from "nanoid";
2287
+ var log13 = createChildLogger("queue");
2288
+ var DEFAULT_OPTIONS = {
2289
+ maxSize: 10,
2290
+ historySize: 50
2291
+ };
2292
+ var JobQueue = class {
2293
+ pending = [];
2294
+ active = null;
2295
+ completed = [];
2296
+ totalProcessed = 0;
2297
+ processing = false;
2298
+ options;
2299
+ processor;
2300
+ constructor(processor, options) {
2301
+ this.processor = processor;
2302
+ this.options = { ...DEFAULT_OPTIONS, ...options };
2303
+ }
2304
+ enqueue(params) {
2305
+ if (this.pending.length >= this.options.maxSize) {
2306
+ throw new Error(`Queue is full (max ${this.options.maxSize})`);
2307
+ }
2308
+ const isDuplicate = this.isDuplicateIn(this.pending, params) || this.active && this.active.owner === params.owner && this.active.repo === params.repo && this.active.issueNumber === params.issueNumber;
2309
+ if (isDuplicate) {
2310
+ throw new Error(
2311
+ `Issue ${params.owner}/${params.repo}#${params.issueNumber} is already queued or active`
2312
+ );
2313
+ }
2314
+ const job = {
2315
+ id: nanoid2(12),
2316
+ owner: params.owner,
2317
+ repo: params.repo,
2318
+ issueNumber: params.issueNumber,
2319
+ issueUrl: params.issueUrl,
2320
+ status: "pending",
2321
+ enqueuedAt: /* @__PURE__ */ new Date()
2322
+ };
2323
+ this.pending.push(job);
2324
+ log13.info(
2325
+ { jobId: job.id, owner: job.owner, repo: job.repo, issue: job.issueNumber },
2326
+ "Job enqueued"
2327
+ );
2328
+ void this.processNext();
2329
+ return job;
2330
+ }
2331
+ getStatus() {
2332
+ return {
2333
+ active: this.active,
2334
+ pending: [...this.pending],
2335
+ completed: [...this.completed],
2336
+ totalProcessed: this.totalProcessed
2337
+ };
2338
+ }
2339
+ getJob(id) {
2340
+ if (this.active?.id === id) return this.active;
2341
+ return this.pending.find((j) => j.id === id) ?? this.completed.find((j) => j.id === id);
2342
+ }
2343
+ get size() {
2344
+ return this.pending.length;
2345
+ }
2346
+ get isFull() {
2347
+ return this.pending.length >= this.options.maxSize;
2348
+ }
2349
+ isDuplicateIn(jobs, params) {
2350
+ return jobs.some(
2351
+ (j) => j.owner === params.owner && j.repo === params.repo && j.issueNumber === params.issueNumber
2352
+ );
2353
+ }
2354
+ async processNext() {
2355
+ if (this.processing || this.pending.length === 0) return;
2356
+ this.processing = true;
2357
+ const job = this.pending.shift();
2358
+ job.status = "running";
2359
+ job.startedAt = /* @__PURE__ */ new Date();
2360
+ this.active = job;
2361
+ log13.info(
2362
+ { jobId: job.id, owner: job.owner, repo: job.repo, issue: job.issueNumber },
2363
+ "Processing job"
2364
+ );
2365
+ try {
2366
+ const result = await this.processor(job);
2367
+ job.status = "completed";
2368
+ job.result = result;
2369
+ } catch (error) {
2370
+ job.status = "failed";
2371
+ job.error = error instanceof Error ? error.message : String(error);
2372
+ log13.error(
2373
+ { jobId: job.id, error: job.error },
2374
+ "Job failed"
2375
+ );
2376
+ } finally {
2377
+ job.completedAt = /* @__PURE__ */ new Date();
2378
+ this.active = null;
2379
+ this.totalProcessed++;
2380
+ this.completed.unshift(job);
2381
+ if (this.completed.length > this.options.historySize) {
2382
+ this.completed = this.completed.slice(0, this.options.historySize);
2383
+ }
2384
+ this.processing = false;
2385
+ log13.info(
2386
+ { jobId: job.id, status: job.status, totalProcessed: this.totalProcessed },
2387
+ "Job finished"
2388
+ );
2389
+ void this.processNext();
2390
+ }
2391
+ }
2392
+ };
2393
+
2394
+ // src/server/webhook.ts
2395
+ var log14 = createChildLogger("webhook-server");
2396
+ var MAX_BODY_SIZE = 1048576;
2397
+ function readBody(req) {
2398
+ return new Promise((resolve2, reject) => {
2399
+ const chunks = [];
2400
+ let size = 0;
2401
+ req.on("data", (chunk) => {
2402
+ size += chunk.length;
2403
+ if (size > MAX_BODY_SIZE) {
2404
+ req.destroy();
2405
+ reject(new Error("Payload too large"));
2406
+ return;
2407
+ }
2408
+ chunks.push(chunk);
2409
+ });
2410
+ req.on("end", () => resolve2(Buffer.concat(chunks)));
2411
+ req.on("error", reject);
2412
+ });
2413
+ }
2414
+ function verifySignature(payload, signature, secret) {
2415
+ const expected = "sha256=" + crypto.createHmac("sha256", secret).update(payload).digest("hex");
2416
+ if (expected.length !== signature.length) return false;
2417
+ return crypto.timingSafeEqual(Buffer.from(expected), Buffer.from(signature));
2418
+ }
2419
+ function sendJson(res, statusCode, body) {
2420
+ res.writeHead(statusCode, { "Content-Type": "application/json" });
2421
+ res.end(JSON.stringify(body));
2422
+ }
2423
+ function createWebhookServer(config, options) {
2424
+ const startTime = Date.now();
2425
+ const readyLabel = config.defaults.labels.ready;
2426
+ const queue = new JobQueue(
2427
+ async (job) => {
2428
+ const repoConf = config.repos.find((r) => r.owner === job.owner && r.name === job.repo);
2429
+ return runPipeline({
2430
+ repoOwner: job.owner,
2431
+ repoName: job.repo,
2432
+ issueNumber: job.issueNumber,
2433
+ config,
2434
+ localDir: repoConf?.localPath
2435
+ });
2436
+ },
2437
+ {
2438
+ maxSize: options.maxQueueSize ?? 10,
2439
+ historySize: options.historySize ?? 50
2440
+ }
2441
+ );
2442
+ function handleHealth(_req, res) {
2443
+ sendJson(res, 200, {
2444
+ status: "ok",
2445
+ uptime: Math.floor((Date.now() - startTime) / 1e3)
2446
+ });
2447
+ }
2448
+ function handleStatus(_req, res) {
2449
+ sendJson(res, 200, queue.getStatus());
2450
+ }
2451
+ async function handleWebhook(req, res) {
2452
+ let body;
2453
+ try {
2454
+ body = await readBody(req);
2455
+ } catch {
2456
+ sendJson(res, 413, { error: "Payload too large" });
2457
+ return;
2458
+ }
2459
+ const signature = req.headers["x-hub-signature-256"];
2460
+ if (!signature) {
2461
+ sendJson(res, 401, { error: "Missing signature" });
2462
+ return;
2463
+ }
2464
+ if (!verifySignature(body, signature, options.secret)) {
2465
+ sendJson(res, 401, { error: "Invalid signature" });
2466
+ return;
2467
+ }
2468
+ let payload;
2469
+ try {
2470
+ payload = JSON.parse(body.toString("utf-8"));
2471
+ } catch {
2472
+ sendJson(res, 400, { error: "Invalid JSON" });
2473
+ return;
2474
+ }
2475
+ const event = req.headers["x-github-event"];
2476
+ if (event !== "issues") {
2477
+ sendJson(res, 200, { ignored: true, reason: `Event type "${event}" not handled` });
2478
+ return;
2479
+ }
2480
+ if (payload.action === "labeled") {
2481
+ const labelName = payload.label?.name;
2482
+ if (labelName !== readyLabel) {
2483
+ sendJson(res, 200, { ignored: true, reason: `Label "${labelName}" does not match ready label "${readyLabel}"` });
2484
+ return;
2485
+ }
2486
+ } else if (payload.action === "reopened") {
2487
+ const labels = payload.issue?.labels;
2488
+ const hasReadyLabel = labels?.some((l) => l.name === readyLabel);
2489
+ if (!hasReadyLabel) {
2490
+ sendJson(res, 200, { ignored: true, reason: `Reopened issue does not have "${readyLabel}" label` });
2491
+ return;
2492
+ }
2493
+ } else {
2494
+ sendJson(res, 200, { ignored: true, reason: `Action "${payload.action}" not handled` });
2495
+ return;
2496
+ }
2497
+ const owner = payload.repository?.owner?.login;
2498
+ const repo = payload.repository?.name;
2499
+ const issueNumber = payload.issue?.number;
2500
+ const issueUrl = payload.issue?.html_url;
2501
+ if (!owner || !repo || !issueNumber || !issueUrl) {
2502
+ sendJson(res, 400, { error: "Missing required fields in payload" });
2503
+ return;
2504
+ }
2505
+ const repoConfigured = config.repos.some((r) => r.owner === owner && r.name === repo);
2506
+ if (!repoConfigured) {
2507
+ sendJson(res, 422, { error: `Repository ${owner}/${repo} not configured` });
2508
+ return;
2509
+ }
2510
+ try {
2511
+ const job = queue.enqueue({ owner, repo, issueNumber, issueUrl });
2512
+ log14.info(
2513
+ { jobId: job.id, owner, repo, issueNumber },
2514
+ "Webhook accepted, job enqueued"
2515
+ );
2516
+ sendJson(res, 202, { accepted: true, jobId: job.id, position: queue.size });
2517
+ } catch (error) {
2518
+ const message = error instanceof Error ? error.message : String(error);
2519
+ if (message.includes("already queued")) {
2520
+ sendJson(res, 409, { error: message });
2521
+ } else if (message.includes("Queue is full")) {
2522
+ sendJson(res, 503, { error: message });
2523
+ } else {
2524
+ sendJson(res, 500, { error: message });
2525
+ }
2526
+ }
2527
+ }
2528
+ const server = createServer(async (req, res) => {
2529
+ const method = req.method ?? "GET";
2530
+ const url = req.url ?? "/";
2531
+ log14.debug({ method, url }, "Request received");
2532
+ try {
2533
+ if (method === "GET" && url === "/health") {
2534
+ handleHealth(req, res);
2535
+ } else if (method === "GET" && url === "/status") {
2536
+ handleStatus(req, res);
2537
+ } else if (method === "POST" && url === "/webhook") {
2538
+ await handleWebhook(req, res);
2539
+ } else {
2540
+ sendJson(res, 404, { error: "Not found" });
2541
+ }
2542
+ } catch (error) {
2543
+ const message = error instanceof Error ? error.message : String(error);
2544
+ log14.error({ error: message }, "Request handler error");
2545
+ if (!res.headersSent) {
2546
+ sendJson(res, 500, { error: "Internal server error" });
2547
+ }
2548
+ }
2549
+ });
2550
+ return {
2551
+ start() {
2552
+ return new Promise((resolve2, reject) => {
2553
+ server.listen(options.port, options.host, () => {
2554
+ log14.info({ port: options.port, host: options.host }, "Webhook server started");
2555
+ resolve2();
2556
+ });
2557
+ server.on("error", reject);
2558
+ });
2559
+ },
2560
+ stop() {
2561
+ return new Promise((resolve2) => {
2562
+ server.close(() => {
2563
+ log14.info("Webhook server stopped");
2564
+ resolve2();
2565
+ });
2566
+ });
2567
+ },
2568
+ getQueueStatus() {
2569
+ return queue.getStatus();
2570
+ },
2571
+ address() {
2572
+ const addr = server.address();
2573
+ if (!addr || typeof addr === "string") return null;
2574
+ return { host: addr.address, port: addr.port };
2575
+ }
2576
+ };
2577
+ }
2578
+ export {
2579
+ JobQueue,
2580
+ appendRecord,
2581
+ buildRecordFromState,
2582
+ createWebhookServer,
2583
+ isAutoMergeEligible,
2584
+ loadConfig,
2585
+ parseIssueBody,
2586
+ parseIssueUrl,
2587
+ parsePRUrl,
2588
+ readRecords,
2589
+ resolveRepoConfig,
2590
+ runCodingAgent,
2591
+ runCommand,
2592
+ runPipeline,
2593
+ runPlanningAgent,
2594
+ runReviewerAgent,
2595
+ runSecurityScanner,
2596
+ runTests,
2597
+ summarize
2598
+ };
2599
+ //# sourceMappingURL=index.js.map