@renseiai/agentfactory 0.8.19 → 0.8.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/config/repository-config.d.ts +7 -0
- package/dist/src/config/repository-config.d.ts.map +1 -1
- package/dist/src/config/repository-config.js +15 -1
- package/dist/src/config/repository-config.test.js +1 -1
- package/dist/src/governor/decision-engine-adapter.js +5 -10
- package/dist/src/governor/decision-engine-adapter.test.js +13 -14
- package/dist/src/governor/decision-engine.js +3 -7
- package/dist/src/governor/decision-engine.test.js +5 -5
- package/dist/src/index.d.ts +1 -0
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +1 -0
- package/dist/src/merge-queue/adapters/local.d.ts +68 -0
- package/dist/src/merge-queue/adapters/local.d.ts.map +1 -0
- package/dist/src/merge-queue/adapters/local.js +136 -0
- package/dist/src/merge-queue/adapters/local.test.d.ts +2 -0
- package/dist/src/merge-queue/adapters/local.test.d.ts.map +1 -0
- package/dist/src/merge-queue/adapters/local.test.js +176 -0
- package/dist/src/merge-queue/index.d.ts +13 -5
- package/dist/src/merge-queue/index.d.ts.map +1 -1
- package/dist/src/merge-queue/index.js +13 -6
- package/dist/src/merge-queue/merge-queue.integration.test.js +19 -0
- package/dist/src/merge-queue/merge-worker.d.ts.map +1 -1
- package/dist/src/merge-queue/merge-worker.js +29 -0
- package/dist/src/merge-queue/types.d.ts +1 -1
- package/dist/src/merge-queue/types.d.ts.map +1 -1
- package/dist/src/orchestrator/index.d.ts +4 -0
- package/dist/src/orchestrator/index.d.ts.map +1 -1
- package/dist/src/orchestrator/index.js +3 -0
- package/dist/src/orchestrator/orchestrator.d.ts +31 -0
- package/dist/src/orchestrator/orchestrator.d.ts.map +1 -1
- package/dist/src/orchestrator/orchestrator.js +263 -11
- package/dist/src/orchestrator/parse-work-result.d.ts.map +1 -1
- package/dist/src/orchestrator/parse-work-result.js +3 -1
- package/dist/src/orchestrator/parse-work-result.test.js +6 -0
- package/dist/src/orchestrator/quality-baseline.d.ts +83 -0
- package/dist/src/orchestrator/quality-baseline.d.ts.map +1 -0
- package/dist/src/orchestrator/quality-baseline.js +313 -0
- package/dist/src/orchestrator/quality-baseline.test.d.ts +2 -0
- package/dist/src/orchestrator/quality-baseline.test.d.ts.map +1 -0
- package/dist/src/orchestrator/quality-baseline.test.js +448 -0
- package/dist/src/orchestrator/quality-ratchet.d.ts +70 -0
- package/dist/src/orchestrator/quality-ratchet.d.ts.map +1 -0
- package/dist/src/orchestrator/quality-ratchet.js +162 -0
- package/dist/src/orchestrator/quality-ratchet.test.d.ts +2 -0
- package/dist/src/orchestrator/quality-ratchet.test.d.ts.map +1 -0
- package/dist/src/orchestrator/quality-ratchet.test.js +335 -0
- package/dist/src/orchestrator/types.d.ts +2 -0
- package/dist/src/orchestrator/types.d.ts.map +1 -1
- package/dist/src/providers/codex-app-server-provider.d.ts +37 -1
- package/dist/src/providers/codex-app-server-provider.d.ts.map +1 -1
- package/dist/src/providers/codex-app-server-provider.js +290 -35
- package/dist/src/providers/codex-app-server-provider.test.js +72 -12
- package/dist/src/providers/codex-approval-bridge.d.ts +49 -0
- package/dist/src/providers/codex-approval-bridge.d.ts.map +1 -0
- package/dist/src/providers/codex-approval-bridge.js +117 -0
- package/dist/src/providers/codex-approval-bridge.test.d.ts +2 -0
- package/dist/src/providers/codex-approval-bridge.test.d.ts.map +1 -0
- package/dist/src/providers/codex-approval-bridge.test.js +188 -0
- package/dist/src/providers/types.d.ts +25 -0
- package/dist/src/providers/types.d.ts.map +1 -1
- package/dist/src/routing/types.d.ts +1 -1
- package/dist/src/templates/adapters.d.ts +25 -0
- package/dist/src/templates/adapters.d.ts.map +1 -1
- package/dist/src/templates/adapters.js +70 -0
- package/dist/src/templates/adapters.test.js +49 -0
- package/dist/src/templates/index.d.ts +1 -0
- package/dist/src/templates/index.d.ts.map +1 -1
- package/dist/src/templates/registry.d.ts +8 -0
- package/dist/src/templates/registry.d.ts.map +1 -1
- package/dist/src/templates/registry.js +11 -0
- package/dist/src/templates/types.d.ts +22 -0
- package/dist/src/templates/types.d.ts.map +1 -1
- package/dist/src/templates/types.js +12 -0
- package/dist/src/tools/index.d.ts +2 -0
- package/dist/src/tools/index.d.ts.map +1 -1
- package/dist/src/tools/index.js +1 -0
- package/dist/src/tools/registry.d.ts +9 -1
- package/dist/src/tools/registry.d.ts.map +1 -1
- package/dist/src/tools/registry.js +13 -1
- package/dist/src/tools/stdio-server-entry.d.ts +25 -0
- package/dist/src/tools/stdio-server-entry.d.ts.map +1 -0
- package/dist/src/tools/stdio-server-entry.js +205 -0
- package/dist/src/tools/stdio-server.d.ts +87 -0
- package/dist/src/tools/stdio-server.d.ts.map +1 -0
- package/dist/src/tools/stdio-server.js +138 -0
- package/dist/src/workflow/workflow-types.d.ts +3 -3
- package/package.json +3 -2
|
@@ -121,6 +121,7 @@ export declare const RepositoryConfigSchema: z.ZodObject<{
|
|
|
121
121
|
mergeQueue: z.ZodOptional<z.ZodObject<{
|
|
122
122
|
provider: z.ZodDefault<z.ZodEnum<{
|
|
123
123
|
"github-native": "github-native";
|
|
124
|
+
local: "local";
|
|
124
125
|
mergify: "mergify";
|
|
125
126
|
trunk: "trunk";
|
|
126
127
|
}>>;
|
|
@@ -159,6 +160,12 @@ export declare const RepositoryConfigSchema: z.ZodObject<{
|
|
|
159
160
|
default: "default";
|
|
160
161
|
mergiraf: "mergiraf";
|
|
161
162
|
}>>;
|
|
163
|
+
quality: z.ZodOptional<z.ZodObject<{
|
|
164
|
+
baselineEnabled: z.ZodDefault<z.ZodBoolean>;
|
|
165
|
+
ratchetEnabled: z.ZodDefault<z.ZodBoolean>;
|
|
166
|
+
boyscoutRule: z.ZodDefault<z.ZodBoolean>;
|
|
167
|
+
tddWorkflow: z.ZodDefault<z.ZodBoolean>;
|
|
168
|
+
}, z.core.$strip>>;
|
|
162
169
|
}, z.core.$strip>;
|
|
163
170
|
export type RepositoryConfig = z.infer<typeof RepositoryConfigSchema>;
|
|
164
171
|
/**
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"repository-config.d.ts","sourceRoot":"","sources":["../../../src/config/repository-config.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAA;AAIvB,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAA;AAC5D,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAA;AAMxD,qEAAqE;AACrE,eAAO,MAAM,mBAAmB;;;;;;;;;;;;iBAW9B,CAAA;AAEF,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAA;AAW/D,uCAAuC;AACvC,eAAO,MAAM,qBAAqB;;;;;;;;;;;;;;;;;;;;;;iBAOhC,CAAA;AAEF,6DAA6D;AAC7D,eAAO,MAAM,0BAA0B;;;;;;;iBAarC,CAAA;AAEF,eAAO,MAAM,sBAAsB
|
|
1
|
+
{"version":3,"file":"repository-config.d.ts","sourceRoot":"","sources":["../../../src/config/repository-config.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAA;AAIvB,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAA;AAC5D,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAA;AAMxD,qEAAqE;AACrE,eAAO,MAAM,mBAAmB;;;;;;;;;;;;iBAW9B,CAAA;AAEF,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAA;AAW/D,uCAAuC;AACvC,eAAO,MAAM,qBAAqB;;;;;;;;;;;;;;;;;;;;;;iBAOhC,CAAA;AAEF,6DAA6D;AAC7D,eAAO,MAAM,0BAA0B;;;;;;;iBAarC,CAAA;AAEF,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA4HlC,CAAA;AAMD,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAA;AAMrE;;;;GAIG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,gBAAgB,GAAG,MAAM,EAAE,GAAG,SAAS,CAK1F;AAED;;;;GAIG;AACH,wBAAgB,gBAAgB,CAAC,MAAM,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,aAAa,GAAG,IAAI,CAepG;AAED;;;GAGG;AACH,wBAAgB,cAAc,CAAC,MAAM,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,CAKhG;AAED;;;GAGG;AACH,wBAAgB,kBAAkB,CAAC,MAAM,EAAE,gBAAgB,GAAG,eAAe,GAAG,SAAS,CAExF;AAED;;;GAGG;AACH,wBAAgB,gBAAgB,CAAC,MAAM,EAAE,gBAAgB,GAAG,aAAa,GAAG,SAAS,CAEpF;AAMD;;;;;;GAMG;AACH,wBAAgB,oBAAoB,CAAC,OAAO,EAAE,MAAM,GAAG,gBAAgB,GAAG,IAAI,CAQ7E"}
|
|
@@ -116,7 +116,7 @@ export const RepositoryConfigSchema = z.object({
|
|
|
116
116
|
*/
|
|
117
117
|
mergeQueue: z.object({
|
|
118
118
|
/** Merge queue provider to use */
|
|
119
|
-
provider: z.enum(['github-native', 'mergify', 'trunk']).default('
|
|
119
|
+
provider: z.enum(['github-native', 'local', 'mergify', 'trunk']).default('local'),
|
|
120
120
|
/** Whether merge queue integration is enabled */
|
|
121
121
|
enabled: z.boolean().default(false),
|
|
122
122
|
/** Automatically add approved PRs to merge queue */
|
|
@@ -164,6 +164,20 @@ export const RepositoryConfigSchema = z.object({
|
|
|
164
164
|
* Defaults to 'default' (standard git line-based merge).
|
|
165
165
|
*/
|
|
166
166
|
mergeDriver: z.enum(['mergiraf', 'default']).optional(),
|
|
167
|
+
/**
|
|
168
|
+
* Quality gate configuration.
|
|
169
|
+
* Controls baseline-diff quality checks and ratchet enforcement.
|
|
170
|
+
*/
|
|
171
|
+
quality: z.object({
|
|
172
|
+
/** Enable quality baseline capture at worktree creation and post-session delta check */
|
|
173
|
+
baselineEnabled: z.boolean().default(false),
|
|
174
|
+
/** Enable quality ratchet enforcement in merge queue and CI */
|
|
175
|
+
ratchetEnabled: z.boolean().default(false),
|
|
176
|
+
/** Include boy scout rule instructions in agent prompts */
|
|
177
|
+
boyscoutRule: z.boolean().default(true),
|
|
178
|
+
/** Include TDD workflow instructions in agent prompts */
|
|
179
|
+
tddWorkflow: z.boolean().default(true),
|
|
180
|
+
}).optional(),
|
|
167
181
|
}).refine((data) => !(data.allowedProjects && data.projectPaths), { message: 'allowedProjects and projectPaths are mutually exclusive — use one or the other' });
|
|
168
182
|
// ---------------------------------------------------------------------------
|
|
169
183
|
// Helpers
|
|
@@ -695,7 +695,7 @@ describe('RepositoryConfigSchema mergeQueue — Refinery fields', () => {
|
|
|
695
695
|
mergeQueue: { enabled: true },
|
|
696
696
|
});
|
|
697
697
|
expect(result.mergeQueue).toBeDefined();
|
|
698
|
-
expect(result.mergeQueue.provider).toBe('
|
|
698
|
+
expect(result.mergeQueue.provider).toBe('local');
|
|
699
699
|
expect(result.mergeQueue.enabled).toBe(true);
|
|
700
700
|
expect(result.mergeQueue.autoMerge).toBe(true);
|
|
701
701
|
expect(result.mergeQueue.requiredChecks).toBeUndefined();
|
|
@@ -335,16 +335,11 @@ function buildFinishedNode(config, includeMergeQueue) {
|
|
|
335
335
|
},
|
|
336
336
|
},
|
|
337
337
|
];
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
with: {
|
|
344
|
-
reason: 'Issue {{ issue.identifier }} is in Finished — enqueuing to merge queue',
|
|
345
|
-
},
|
|
346
|
-
});
|
|
347
|
-
}
|
|
338
|
+
// Always trigger QA for functional validation — merge queue handles git
|
|
339
|
+
// mechanics at merge time, not as a QA bypass.
|
|
340
|
+
// The includeMergeQueue flag is preserved for the adapter signature but the
|
|
341
|
+
// Finished node no longer short-circuits to trigger-merge.
|
|
342
|
+
void includeMergeQueue;
|
|
348
343
|
steps.push({
|
|
349
344
|
id: 'dispatch-qa',
|
|
350
345
|
action: 'trigger-qa',
|
|
@@ -149,21 +149,19 @@ describe('DecisionEngineAdapter', () => {
|
|
|
149
149
|
expect(actions).toContain('escalate-human');
|
|
150
150
|
expect(actions).toContain('decompose');
|
|
151
151
|
});
|
|
152
|
-
it('includes merge queue step in Finished
|
|
153
|
-
const
|
|
152
|
+
it('never includes merge queue step in Finished (merge queue does not bypass QA)', () => {
|
|
153
|
+
const workflowEnabled = DecisionEngineAdapter.toWorkflowDefinition({
|
|
154
154
|
includeMergeQueue: true,
|
|
155
155
|
});
|
|
156
|
-
const
|
|
157
|
-
const
|
|
158
|
-
expect(
|
|
159
|
-
|
|
160
|
-
it('excludes merge queue step in Finished when disabled', () => {
|
|
161
|
-
const workflow = DecisionEngineAdapter.toWorkflowDefinition({
|
|
156
|
+
const nodeEnabled = findNode(workflowEnabled, 'route-finished');
|
|
157
|
+
const actionsEnabled = nodeEnabled?.steps?.map(s => s.action);
|
|
158
|
+
expect(actionsEnabled).not.toContain('trigger-merge');
|
|
159
|
+
const workflowDisabled = DecisionEngineAdapter.toWorkflowDefinition({
|
|
162
160
|
includeMergeQueue: false,
|
|
163
161
|
});
|
|
164
|
-
const
|
|
165
|
-
const
|
|
166
|
-
expect(
|
|
162
|
+
const nodeDisabled = findNode(workflowDisabled, 'route-finished');
|
|
163
|
+
const actionsDisabled = nodeDisabled?.steps?.map(s => s.action);
|
|
164
|
+
expect(actionsDisabled).not.toContain('trigger-merge');
|
|
167
165
|
});
|
|
168
166
|
it('includes Delivered routing node', () => {
|
|
169
167
|
const workflow = DecisionEngineAdapter.toWorkflowDefinition();
|
|
@@ -294,16 +292,17 @@ describe('DecisionEngineAdapter', () => {
|
|
|
294
292
|
const decomposeStep = node?.steps?.find(s => s.action === 'decompose');
|
|
295
293
|
expect(decomposeStep).toBeDefined();
|
|
296
294
|
});
|
|
297
|
-
it('
|
|
295
|
+
it('Finished always triggers QA even with merge queue enabled (merge queue does not bypass QA)', () => {
|
|
298
296
|
const ctx = makeContext({
|
|
299
297
|
issue: makeIssue({ status: 'Finished' }),
|
|
300
298
|
mergeQueueEnabled: true,
|
|
301
299
|
});
|
|
302
300
|
const result = decideAction(ctx);
|
|
303
|
-
expect(result.action).toBe('trigger-
|
|
301
|
+
expect(result.action).toBe('trigger-qa');
|
|
304
302
|
const node = findNode(workflow, 'route-finished');
|
|
303
|
+
// No trigger-merge step should exist in the Finished node
|
|
305
304
|
const mergeStep = node?.steps?.find(s => s.action === 'trigger-merge');
|
|
306
|
-
expect(mergeStep).
|
|
305
|
+
expect(mergeStep).toBeUndefined();
|
|
307
306
|
});
|
|
308
307
|
it('covers Delivered → trigger-acceptance', () => {
|
|
309
308
|
const ctx = makeContext({ issue: makeIssue({ status: 'Delivered' }) });
|
|
@@ -244,13 +244,9 @@ function decideFinished(ctx) {
|
|
|
244
244
|
reason: `Issue ${issue.identifier} is in Finished with decompose strategy — triggering decomposition`,
|
|
245
245
|
};
|
|
246
246
|
}
|
|
247
|
-
//
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
action: 'trigger-merge',
|
|
251
|
-
reason: `Issue ${issue.identifier} is in Finished — enqueuing to merge queue`,
|
|
252
|
-
};
|
|
253
|
-
}
|
|
247
|
+
// Always run QA to validate functional correctness, even when merge queue is
|
|
248
|
+
// enabled. The merge queue handles git mechanics (rebase, conflict resolution)
|
|
249
|
+
// at merge time — it should not bypass implementation validation.
|
|
254
250
|
return {
|
|
255
251
|
action: 'trigger-qa',
|
|
256
252
|
reason: `Issue ${issue.identifier} is in Finished — triggering QA`,
|
|
@@ -408,15 +408,15 @@ describe('decideAction — Finished (QA)', () => {
|
|
|
408
408
|
// ---------------------------------------------------------------------------
|
|
409
409
|
// Finished (merge queue)
|
|
410
410
|
// ---------------------------------------------------------------------------
|
|
411
|
-
describe('decideAction — Finished (merge queue)', () => {
|
|
412
|
-
it('triggers
|
|
411
|
+
describe('decideAction — Finished (merge queue does not bypass QA)', () => {
|
|
412
|
+
it('triggers QA even when mergeQueueEnabled is true', () => {
|
|
413
413
|
const ctx = makeContext({
|
|
414
414
|
issue: makeIssue({ status: 'Finished' }),
|
|
415
415
|
mergeQueueEnabled: true,
|
|
416
416
|
});
|
|
417
417
|
const result = decideAction(ctx);
|
|
418
|
-
expect(result.action).toBe('trigger-
|
|
419
|
-
expect(result.reason).toContain('
|
|
418
|
+
expect(result.action).toBe('trigger-qa');
|
|
419
|
+
expect(result.reason).toContain('triggering QA');
|
|
420
420
|
});
|
|
421
421
|
it('triggers QA when mergeQueueEnabled is false', () => {
|
|
422
422
|
const ctx = makeContext({
|
|
@@ -453,7 +453,7 @@ describe('decideAction — Finished (merge queue)', () => {
|
|
|
453
453
|
const result = decideAction(ctx);
|
|
454
454
|
expect(result.action).toBe('decompose');
|
|
455
455
|
});
|
|
456
|
-
it('auto-QA disabled takes precedence
|
|
456
|
+
it('auto-QA disabled takes precedence even with merge queue', () => {
|
|
457
457
|
const ctx = makeContext({
|
|
458
458
|
issue: makeIssue({ status: 'Finished' }),
|
|
459
459
|
mergeQueueEnabled: true,
|
package/dist/src/index.d.ts
CHANGED
package/dist/src/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,yBAAyB,CAAA;AACvC,cAAc,sBAAsB,CAAA;AACpC,cAAc,aAAa,CAAA;AAC3B,cAAc,uBAAuB,CAAA;AACrC,cAAc,sBAAsB,CAAA;AACpC,cAAc,qBAAqB,CAAA;AACnC,cAAc,qBAAqB,CAAA;AACnC,cAAc,mBAAmB,CAAA;AACjC,cAAc,qBAAqB,CAAA;AACnC,cAAc,kBAAkB,CAAA;AAChC,cAAc,qBAAqB,CAAA;AACnC,cAAc,oBAAoB,CAAA;AAClC,cAAc,qBAAqB,CAAA"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,yBAAyB,CAAA;AACvC,cAAc,sBAAsB,CAAA;AACpC,cAAc,aAAa,CAAA;AAC3B,cAAc,uBAAuB,CAAA;AACrC,cAAc,sBAAsB,CAAA;AACpC,cAAc,qBAAqB,CAAA;AACnC,cAAc,qBAAqB,CAAA;AACnC,cAAc,mBAAmB,CAAA;AACjC,cAAc,qBAAqB,CAAA;AACnC,cAAc,kBAAkB,CAAA;AAChC,cAAc,qBAAqB,CAAA;AACnC,cAAc,oBAAoB,CAAA;AAClC,cAAc,qBAAqB,CAAA;AACnC,cAAc,wBAAwB,CAAA"}
|
package/dist/src/index.js
CHANGED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Local Merge Queue Adapter
|
|
3
|
+
*
|
|
4
|
+
* Self-hosted merge queue that uses the built-in merge worker + Redis storage
|
|
5
|
+
* instead of an external service like GitHub's merge queue. This is the default
|
|
6
|
+
* provider — it works with any GitHub repository without requiring GitHub's
|
|
7
|
+
* paid merge queue feature.
|
|
8
|
+
*
|
|
9
|
+
* The adapter handles queue management (enqueue/dequeue/status). The merge worker
|
|
10
|
+
* (merge-worker.ts) handles the actual rebase → resolve → test → merge pipeline.
|
|
11
|
+
*
|
|
12
|
+
* PR eligibility is checked via `gh pr view` CLI (no GraphQL needed).
|
|
13
|
+
*/
|
|
14
|
+
import type { MergeQueueAdapter, MergeQueueStatus } from '../types.js';
|
|
15
|
+
export interface LocalMergeQueueStorage {
|
|
16
|
+
enqueue(entry: {
|
|
17
|
+
repoId: string;
|
|
18
|
+
prNumber: number;
|
|
19
|
+
prUrl: string;
|
|
20
|
+
issueIdentifier: string;
|
|
21
|
+
priority: number;
|
|
22
|
+
sourceBranch: string;
|
|
23
|
+
targetBranch: string;
|
|
24
|
+
}): Promise<void>;
|
|
25
|
+
dequeue(repoId: string): Promise<{
|
|
26
|
+
prNumber: number;
|
|
27
|
+
} | null>;
|
|
28
|
+
/** Get queue depth for a repo */
|
|
29
|
+
getQueueDepth(repoId: string): Promise<number>;
|
|
30
|
+
/** Check if a PR is already in the queue */
|
|
31
|
+
isEnqueued(repoId: string, prNumber: number): Promise<boolean>;
|
|
32
|
+
/** Get position of a PR in the queue (1-based), or null if not queued */
|
|
33
|
+
getPosition(repoId: string, prNumber: number): Promise<number | null>;
|
|
34
|
+
/** Remove a specific PR from the queue */
|
|
35
|
+
remove(repoId: string, prNumber: number): Promise<void>;
|
|
36
|
+
/** Get failed/blocked status for a PR */
|
|
37
|
+
getFailedReason(repoId: string, prNumber: number): Promise<string | null>;
|
|
38
|
+
getBlockedReason(repoId: string, prNumber: number): Promise<string | null>;
|
|
39
|
+
}
|
|
40
|
+
export declare class LocalMergeQueueAdapter implements MergeQueueAdapter {
|
|
41
|
+
private storage;
|
|
42
|
+
readonly name: "local";
|
|
43
|
+
constructor(storage: LocalMergeQueueStorage);
|
|
44
|
+
/**
|
|
45
|
+
* Check if a PR is eligible for the local merge queue.
|
|
46
|
+
* Uses `gh pr view` to verify PR is open. Does NOT require the PR to be
|
|
47
|
+
* conflict-free — the merge worker handles rebasing.
|
|
48
|
+
*/
|
|
49
|
+
canEnqueue(owner: string, repo: string, prNumber: number): Promise<boolean>;
|
|
50
|
+
/**
|
|
51
|
+
* Add a PR to the local merge queue.
|
|
52
|
+
* The merge worker will pick it up and process it (rebase, test, merge).
|
|
53
|
+
*/
|
|
54
|
+
enqueue(owner: string, repo: string, prNumber: number): Promise<MergeQueueStatus>;
|
|
55
|
+
/**
|
|
56
|
+
* Get the status of a PR in the local merge queue.
|
|
57
|
+
*/
|
|
58
|
+
getStatus(owner: string, repo: string, prNumber: number): Promise<MergeQueueStatus>;
|
|
59
|
+
/**
|
|
60
|
+
* Remove a PR from the local merge queue.
|
|
61
|
+
*/
|
|
62
|
+
dequeue(owner: string, repo: string, prNumber: number): Promise<void>;
|
|
63
|
+
/**
|
|
64
|
+
* Local merge queue is always available (no external service dependency).
|
|
65
|
+
*/
|
|
66
|
+
isEnabled(_owner: string, _repo: string): Promise<boolean>;
|
|
67
|
+
}
|
|
68
|
+
//# sourceMappingURL=local.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"local.d.ts","sourceRoot":"","sources":["../../../../src/merge-queue/adapters/local.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAIH,OAAO,KAAK,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAA;AAWtE,MAAM,WAAW,sBAAsB;IACrC,OAAO,CAAC,KAAK,EAAE;QACb,MAAM,EAAE,MAAM,CAAA;QACd,QAAQ,EAAE,MAAM,CAAA;QAChB,KAAK,EAAE,MAAM,CAAA;QACb,eAAe,EAAE,MAAM,CAAA;QACvB,QAAQ,EAAE,MAAM,CAAA;QAChB,YAAY,EAAE,MAAM,CAAA;QACpB,YAAY,EAAE,MAAM,CAAA;KACrB,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAEjB,OAAO,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC;QAAE,QAAQ,EAAE,MAAM,CAAA;KAAE,GAAG,IAAI,CAAC,CAAA;IAE7D,iCAAiC;IACjC,aAAa,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAAA;IAE9C,4CAA4C;IAC5C,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC,CAAA;IAE9D,yEAAyE;IACzE,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAA;IAErE,0CAA0C;IAC1C,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAA;IAEvD,yCAAyC;IACzC,eAAe,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAA;IACzE,gBAAgB,CAAC,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAA;CAC3E;AAMD,qBAAa,sBAAuB,YAAW,iBAAiB;IAGlD,OAAO,CAAC,OAAO;IAF3B,QAAQ,CAAC,IAAI,EAAG,OAAO,CAAS;gBAEZ,OAAO,EAAE,sBAAsB;IAEnD;;;;OAIG;IACG,UAAU,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC;IAcjF;;;OAGG;IACG,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,gBAAgB,CAAC;IAqCvF;;OAEG;IACG,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,gBAAgB,CAAC;IAkDzF;;OAEG;IACG,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAK3E;;OAEG;IACG,SAAS,CAAC,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC;CAGjE"}
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Local Merge Queue Adapter
|
|
3
|
+
*
|
|
4
|
+
* Self-hosted merge queue that uses the built-in merge worker + Redis storage
|
|
5
|
+
* instead of an external service like GitHub's merge queue. This is the default
|
|
6
|
+
* provider — it works with any GitHub repository without requiring GitHub's
|
|
7
|
+
* paid merge queue feature.
|
|
8
|
+
*
|
|
9
|
+
* The adapter handles queue management (enqueue/dequeue/status). The merge worker
|
|
10
|
+
* (merge-worker.ts) handles the actual rebase → resolve → test → merge pipeline.
|
|
11
|
+
*
|
|
12
|
+
* PR eligibility is checked via `gh pr view` CLI (no GraphQL needed).
|
|
13
|
+
*/
|
|
14
|
+
import { exec } from 'child_process';
|
|
15
|
+
import { promisify } from 'util';
|
|
16
|
+
const execAsync = promisify(exec);
|
|
17
|
+
/** Timeout for gh CLI calls */
|
|
18
|
+
const GH_CLI_TIMEOUT = 15_000;
|
|
19
|
+
// ---------------------------------------------------------------------------
|
|
20
|
+
// Adapter
|
|
21
|
+
// ---------------------------------------------------------------------------
|
|
22
|
+
export class LocalMergeQueueAdapter {
|
|
23
|
+
storage;
|
|
24
|
+
name = 'local';
|
|
25
|
+
constructor(storage) {
|
|
26
|
+
this.storage = storage;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Check if a PR is eligible for the local merge queue.
|
|
30
|
+
* Uses `gh pr view` to verify PR is open. Does NOT require the PR to be
|
|
31
|
+
* conflict-free — the merge worker handles rebasing.
|
|
32
|
+
*/
|
|
33
|
+
async canEnqueue(owner, repo, prNumber) {
|
|
34
|
+
try {
|
|
35
|
+
const { stdout } = await execAsync(`gh pr view ${prNumber} --repo ${owner}/${repo} --json state,headRefName`, { timeout: GH_CLI_TIMEOUT });
|
|
36
|
+
const pr = JSON.parse(stdout);
|
|
37
|
+
// PR must be open
|
|
38
|
+
return pr.state === 'OPEN';
|
|
39
|
+
}
|
|
40
|
+
catch {
|
|
41
|
+
return false;
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Add a PR to the local merge queue.
|
|
46
|
+
* The merge worker will pick it up and process it (rebase, test, merge).
|
|
47
|
+
*/
|
|
48
|
+
async enqueue(owner, repo, prNumber) {
|
|
49
|
+
const repoId = `${owner}/${repo}`;
|
|
50
|
+
// Check if already enqueued
|
|
51
|
+
const alreadyQueued = await this.storage.isEnqueued(repoId, prNumber);
|
|
52
|
+
if (alreadyQueued) {
|
|
53
|
+
return this.getStatus(owner, repo, prNumber);
|
|
54
|
+
}
|
|
55
|
+
// Fetch PR details for the queue entry
|
|
56
|
+
let sourceBranch = `pr-${prNumber}`;
|
|
57
|
+
let prUrl = `https://github.com/${owner}/${repo}/pull/${prNumber}`;
|
|
58
|
+
try {
|
|
59
|
+
const { stdout } = await execAsync(`gh pr view ${prNumber} --repo ${owner}/${repo} --json headRefName,url`, { timeout: GH_CLI_TIMEOUT });
|
|
60
|
+
const pr = JSON.parse(stdout);
|
|
61
|
+
sourceBranch = pr.headRefName ?? sourceBranch;
|
|
62
|
+
prUrl = pr.url ?? prUrl;
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
// Fall back to defaults
|
|
66
|
+
}
|
|
67
|
+
await this.storage.enqueue({
|
|
68
|
+
repoId,
|
|
69
|
+
prNumber,
|
|
70
|
+
prUrl,
|
|
71
|
+
issueIdentifier: `PR-${prNumber}`,
|
|
72
|
+
priority: 3, // Default priority; orchestrator can override
|
|
73
|
+
sourceBranch,
|
|
74
|
+
targetBranch: 'main',
|
|
75
|
+
});
|
|
76
|
+
return this.getStatus(owner, repo, prNumber);
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Get the status of a PR in the local merge queue.
|
|
80
|
+
*/
|
|
81
|
+
async getStatus(owner, repo, prNumber) {
|
|
82
|
+
const repoId = `${owner}/${repo}`;
|
|
83
|
+
// Check if in queue
|
|
84
|
+
const position = await this.storage.getPosition(repoId, prNumber);
|
|
85
|
+
if (position !== null) {
|
|
86
|
+
return {
|
|
87
|
+
state: position === 1 ? 'merging' : 'queued',
|
|
88
|
+
position,
|
|
89
|
+
checksStatus: [],
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
// Check if failed
|
|
93
|
+
const failedReason = await this.storage.getFailedReason(repoId, prNumber);
|
|
94
|
+
if (failedReason) {
|
|
95
|
+
return {
|
|
96
|
+
state: 'failed',
|
|
97
|
+
failureReason: failedReason,
|
|
98
|
+
checksStatus: [],
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
// Check if blocked (conflict)
|
|
102
|
+
const blockedReason = await this.storage.getBlockedReason(repoId, prNumber);
|
|
103
|
+
if (blockedReason) {
|
|
104
|
+
return {
|
|
105
|
+
state: 'blocked',
|
|
106
|
+
failureReason: blockedReason,
|
|
107
|
+
checksStatus: [],
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
// Check if PR was already merged
|
|
111
|
+
try {
|
|
112
|
+
const { stdout } = await execAsync(`gh pr view ${prNumber} --repo ${owner}/${repo} --json state`, { timeout: GH_CLI_TIMEOUT });
|
|
113
|
+
const pr = JSON.parse(stdout);
|
|
114
|
+
if (pr.state === 'MERGED') {
|
|
115
|
+
return { state: 'merged', checksStatus: [] };
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
catch {
|
|
119
|
+
// Fall through to not-queued
|
|
120
|
+
}
|
|
121
|
+
return { state: 'not-queued', checksStatus: [] };
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* Remove a PR from the local merge queue.
|
|
125
|
+
*/
|
|
126
|
+
async dequeue(owner, repo, prNumber) {
|
|
127
|
+
const repoId = `${owner}/${repo}`;
|
|
128
|
+
await this.storage.remove(repoId, prNumber);
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Local merge queue is always available (no external service dependency).
|
|
132
|
+
*/
|
|
133
|
+
async isEnabled(_owner, _repo) {
|
|
134
|
+
return true;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"local.test.d.ts","sourceRoot":"","sources":["../../../../src/merge-queue/adapters/local.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
+
import { LocalMergeQueueAdapter } from './local.js';
|
|
3
|
+
// Mock child_process.exec
|
|
4
|
+
vi.mock('child_process', () => ({
|
|
5
|
+
exec: vi.fn(),
|
|
6
|
+
}));
|
|
7
|
+
import { exec } from 'child_process';
|
|
8
|
+
function mockExec(stdout) {
|
|
9
|
+
;
|
|
10
|
+
exec.mockImplementation((_cmd, _opts, cb) => {
|
|
11
|
+
// promisify wraps exec — the 2-arg form is (cmd, opts) returning Promise
|
|
12
|
+
// but we also need to handle the 3-arg callback form
|
|
13
|
+
if (typeof _opts === 'function') {
|
|
14
|
+
cb = _opts;
|
|
15
|
+
_opts = undefined;
|
|
16
|
+
}
|
|
17
|
+
if (cb) {
|
|
18
|
+
cb(null, { stdout, stderr: '' });
|
|
19
|
+
return;
|
|
20
|
+
}
|
|
21
|
+
return { stdout, stderr: '' };
|
|
22
|
+
});
|
|
23
|
+
}
|
|
24
|
+
function mockExecReject(error) {
|
|
25
|
+
;
|
|
26
|
+
exec.mockImplementation((_cmd, _opts, cb) => {
|
|
27
|
+
if (typeof _opts === 'function') {
|
|
28
|
+
cb = _opts;
|
|
29
|
+
}
|
|
30
|
+
if (cb) {
|
|
31
|
+
cb(error, null);
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
throw error;
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
function createMockStorage(overrides = {}) {
|
|
38
|
+
return {
|
|
39
|
+
enqueue: vi.fn().mockResolvedValue(undefined),
|
|
40
|
+
dequeue: vi.fn().mockResolvedValue(null),
|
|
41
|
+
getQueueDepth: vi.fn().mockResolvedValue(0),
|
|
42
|
+
isEnqueued: vi.fn().mockResolvedValue(false),
|
|
43
|
+
getPosition: vi.fn().mockResolvedValue(null),
|
|
44
|
+
remove: vi.fn().mockResolvedValue(undefined),
|
|
45
|
+
getFailedReason: vi.fn().mockResolvedValue(null),
|
|
46
|
+
getBlockedReason: vi.fn().mockResolvedValue(null),
|
|
47
|
+
...overrides,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
describe('LocalMergeQueueAdapter', () => {
|
|
51
|
+
let storage;
|
|
52
|
+
let adapter;
|
|
53
|
+
beforeEach(() => {
|
|
54
|
+
vi.clearAllMocks();
|
|
55
|
+
storage = createMockStorage();
|
|
56
|
+
adapter = new LocalMergeQueueAdapter(storage);
|
|
57
|
+
});
|
|
58
|
+
it('has name "local"', () => {
|
|
59
|
+
expect(adapter.name).toBe('local');
|
|
60
|
+
});
|
|
61
|
+
describe('isEnabled', () => {
|
|
62
|
+
it('always returns true', async () => {
|
|
63
|
+
expect(await adapter.isEnabled('owner', 'repo')).toBe(true);
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
describe('canEnqueue', () => {
|
|
67
|
+
it('returns true for open PR', async () => {
|
|
68
|
+
mockExec(JSON.stringify({ state: 'OPEN', headRefName: 'feat/test' }));
|
|
69
|
+
expect(await adapter.canEnqueue('owner', 'repo', 42)).toBe(true);
|
|
70
|
+
});
|
|
71
|
+
it('returns false for merged PR', async () => {
|
|
72
|
+
mockExec(JSON.stringify({ state: 'MERGED', headRefName: 'feat/test' }));
|
|
73
|
+
expect(await adapter.canEnqueue('owner', 'repo', 42)).toBe(false);
|
|
74
|
+
});
|
|
75
|
+
it('returns false for closed PR', async () => {
|
|
76
|
+
mockExec(JSON.stringify({ state: 'CLOSED', headRefName: 'feat/test' }));
|
|
77
|
+
expect(await adapter.canEnqueue('owner', 'repo', 42)).toBe(false);
|
|
78
|
+
});
|
|
79
|
+
it('returns false on gh CLI error', async () => {
|
|
80
|
+
mockExecReject(new Error('gh: not found'));
|
|
81
|
+
expect(await adapter.canEnqueue('owner', 'repo', 42)).toBe(false);
|
|
82
|
+
});
|
|
83
|
+
});
|
|
84
|
+
describe('enqueue', () => {
|
|
85
|
+
it('adds PR to storage and returns queued status', async () => {
|
|
86
|
+
mockExec(JSON.stringify({ headRefName: 'feat/my-branch', url: 'https://github.com/o/r/pull/10' }));
|
|
87
|
+
storage.isEnqueued.mockResolvedValue(false);
|
|
88
|
+
storage.getPosition.mockResolvedValue(1);
|
|
89
|
+
const status = await adapter.enqueue('owner', 'repo', 10);
|
|
90
|
+
expect(storage.enqueue).toHaveBeenCalledWith(expect.objectContaining({
|
|
91
|
+
repoId: 'owner/repo',
|
|
92
|
+
prNumber: 10,
|
|
93
|
+
sourceBranch: 'feat/my-branch',
|
|
94
|
+
targetBranch: 'main',
|
|
95
|
+
priority: 3,
|
|
96
|
+
}));
|
|
97
|
+
expect(status.state).toBe('merging'); // position 1 = merging
|
|
98
|
+
});
|
|
99
|
+
it('returns existing status if already enqueued', async () => {
|
|
100
|
+
;
|
|
101
|
+
storage.isEnqueued.mockResolvedValue(true);
|
|
102
|
+
storage.getPosition.mockResolvedValue(3);
|
|
103
|
+
const status = await adapter.enqueue('owner', 'repo', 10);
|
|
104
|
+
expect(storage.enqueue).not.toHaveBeenCalled();
|
|
105
|
+
expect(status.state).toBe('queued');
|
|
106
|
+
expect(status.position).toBe(3);
|
|
107
|
+
});
|
|
108
|
+
it('falls back to default branch name on gh CLI error', async () => {
|
|
109
|
+
mockExecReject(new Error('gh: not found'));
|
|
110
|
+
storage.isEnqueued.mockResolvedValue(false);
|
|
111
|
+
storage.getPosition.mockResolvedValue(2);
|
|
112
|
+
await adapter.enqueue('owner', 'repo', 5);
|
|
113
|
+
expect(storage.enqueue).toHaveBeenCalledWith(expect.objectContaining({
|
|
114
|
+
sourceBranch: 'pr-5',
|
|
115
|
+
prUrl: 'https://github.com/owner/repo/pull/5',
|
|
116
|
+
}));
|
|
117
|
+
});
|
|
118
|
+
});
|
|
119
|
+
describe('getStatus', () => {
|
|
120
|
+
it('returns queued with position when in queue', async () => {
|
|
121
|
+
;
|
|
122
|
+
storage.getPosition.mockResolvedValue(3);
|
|
123
|
+
const status = await adapter.getStatus('owner', 'repo', 10);
|
|
124
|
+
expect(status.state).toBe('queued');
|
|
125
|
+
expect(status.position).toBe(3);
|
|
126
|
+
});
|
|
127
|
+
it('returns merging when position is 1', async () => {
|
|
128
|
+
;
|
|
129
|
+
storage.getPosition.mockResolvedValue(1);
|
|
130
|
+
const status = await adapter.getStatus('owner', 'repo', 10);
|
|
131
|
+
expect(status.state).toBe('merging');
|
|
132
|
+
expect(status.position).toBe(1);
|
|
133
|
+
});
|
|
134
|
+
it('returns failed when PR has failed reason', async () => {
|
|
135
|
+
;
|
|
136
|
+
storage.getPosition.mockResolvedValue(null);
|
|
137
|
+
storage.getFailedReason.mockResolvedValue('Tests failed');
|
|
138
|
+
const status = await adapter.getStatus('owner', 'repo', 10);
|
|
139
|
+
expect(status.state).toBe('failed');
|
|
140
|
+
expect(status.failureReason).toBe('Tests failed');
|
|
141
|
+
});
|
|
142
|
+
it('returns blocked when PR has blocked reason', async () => {
|
|
143
|
+
;
|
|
144
|
+
storage.getPosition.mockResolvedValue(null);
|
|
145
|
+
storage.getFailedReason.mockResolvedValue(null);
|
|
146
|
+
storage.getBlockedReason.mockResolvedValue('Merge conflict in utils.ts');
|
|
147
|
+
const status = await adapter.getStatus('owner', 'repo', 10);
|
|
148
|
+
expect(status.state).toBe('blocked');
|
|
149
|
+
expect(status.failureReason).toBe('Merge conflict in utils.ts');
|
|
150
|
+
});
|
|
151
|
+
it('returns merged when PR is already merged', async () => {
|
|
152
|
+
;
|
|
153
|
+
storage.getPosition.mockResolvedValue(null);
|
|
154
|
+
storage.getFailedReason.mockResolvedValue(null);
|
|
155
|
+
storage.getBlockedReason.mockResolvedValue(null);
|
|
156
|
+
mockExec(JSON.stringify({ state: 'MERGED' }));
|
|
157
|
+
const status = await adapter.getStatus('owner', 'repo', 10);
|
|
158
|
+
expect(status.state).toBe('merged');
|
|
159
|
+
});
|
|
160
|
+
it('returns not-queued when PR is not in any state', async () => {
|
|
161
|
+
;
|
|
162
|
+
storage.getPosition.mockResolvedValue(null);
|
|
163
|
+
storage.getFailedReason.mockResolvedValue(null);
|
|
164
|
+
storage.getBlockedReason.mockResolvedValue(null);
|
|
165
|
+
mockExec(JSON.stringify({ state: 'OPEN' }));
|
|
166
|
+
const status = await adapter.getStatus('owner', 'repo', 10);
|
|
167
|
+
expect(status.state).toBe('not-queued');
|
|
168
|
+
});
|
|
169
|
+
});
|
|
170
|
+
describe('dequeue', () => {
|
|
171
|
+
it('removes PR from storage', async () => {
|
|
172
|
+
await adapter.dequeue('owner', 'repo', 10);
|
|
173
|
+
expect(storage.remove).toHaveBeenCalledWith('owner/repo', 10);
|
|
174
|
+
});
|
|
175
|
+
});
|
|
176
|
+
});
|
|
@@ -3,16 +3,24 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Factory and exports for merge queue adapters.
|
|
5
5
|
*/
|
|
6
|
+
import type { LocalMergeQueueStorage } from './adapters/local.js';
|
|
6
7
|
export type { MergeQueueAdapter, MergeQueueStatus, MergeQueueProviderName } from './types.js';
|
|
8
|
+
export type { LocalMergeQueueStorage } from './adapters/local.js';
|
|
9
|
+
export { LocalMergeQueueAdapter } from './adapters/local.js';
|
|
10
|
+
export { MergeWorker } from './merge-worker.js';
|
|
11
|
+
export type { MergeWorkerConfig, MergeWorkerDeps, MergeProcessResult } from './merge-worker.js';
|
|
12
|
+
/** Optional dependencies for adapter construction */
|
|
13
|
+
export interface MergeQueueAdapterDeps {
|
|
14
|
+
/** Required for 'local' provider — Redis-backed queue storage */
|
|
15
|
+
storage?: LocalMergeQueueStorage;
|
|
16
|
+
}
|
|
7
17
|
/**
|
|
8
18
|
* Create a merge queue adapter by provider name.
|
|
9
19
|
*
|
|
10
|
-
* Currently only 'github-native' is implemented.
|
|
11
|
-
* 'mergify' and 'trunk' will throw until implemented.
|
|
12
|
-
*
|
|
13
20
|
* @param name - Provider name
|
|
21
|
+
* @param deps - Optional dependencies (storage required for 'local' provider)
|
|
14
22
|
* @returns MergeQueueAdapter instance
|
|
15
|
-
* @throws Error if provider is not yet implemented
|
|
23
|
+
* @throws Error if provider is not yet implemented or required deps are missing
|
|
16
24
|
*/
|
|
17
|
-
export declare function createMergeQueueAdapter(name: import('./types.js').MergeQueueProviderName): import('./types.js').MergeQueueAdapter;
|
|
25
|
+
export declare function createMergeQueueAdapter(name: import('./types.js').MergeQueueProviderName, deps?: MergeQueueAdapterDeps): import('./types.js').MergeQueueAdapter;
|
|
18
26
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/merge-queue/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAIH,YAAY,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,sBAAsB,EAAE,MAAM,YAAY,CAAA;
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/merge-queue/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAIH,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,qBAAqB,CAAA;AAEjE,YAAY,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,sBAAsB,EAAE,MAAM,YAAY,CAAA;AAC7F,YAAY,EAAE,sBAAsB,EAAE,MAAM,qBAAqB,CAAA;AACjE,OAAO,EAAE,sBAAsB,EAAE,MAAM,qBAAqB,CAAA;AAC5D,OAAO,EAAE,WAAW,EAAE,MAAM,mBAAmB,CAAA;AAC/C,YAAY,EAAE,iBAAiB,EAAE,eAAe,EAAE,kBAAkB,EAAE,MAAM,mBAAmB,CAAA;AAE/F,qDAAqD;AACrD,MAAM,WAAW,qBAAqB;IACpC,iEAAiE;IACjE,OAAO,CAAC,EAAE,sBAAsB,CAAA;CACjC;AAED;;;;;;;GAOG;AACH,wBAAgB,uBAAuB,CACrC,IAAI,EAAE,OAAO,YAAY,EAAE,sBAAsB,EACjD,IAAI,CAAC,EAAE,qBAAqB,GAC3B,OAAO,YAAY,EAAE,iBAAiB,CAyBxC"}
|