moflo 4.9.26 → 4.9.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,335 @@
1
+ /**
2
+ * Daemon HTTP RPC for memory writes (#981 — single-writer architecture).
3
+ *
4
+ * Adds POST /api/memory/{store,delete,batch} to the existing daemon HTTP
5
+ * server. The daemon process becomes the single authoritative writer when
6
+ * these endpoints are called; other processes (CLI, MCP server) route
7
+ * writes here via the daemon-write-client (Story #984).
8
+ *
9
+ * Story #983 ships these endpoints purely additively — nothing in the
10
+ * codebase calls them yet. Stories #985 / #986 wire consumer callers.
11
+ *
12
+ * Loopback-only: the parent server binds 127.0.0.1, so no auth/CSRF.
13
+ *
14
+ * @module daemon-memory-rpc
15
+ */
16
+ import { errorDetail } from '../shared/utils/error-detail.js';
17
+ // ============================================================================
18
+ // Constants
19
+ // ============================================================================
20
+ /**
21
+ * Maximum POST body size for memory RPC. Larger bodies → 413.
22
+ * 1 MiB is well above any reasonable single entry while bounding
23
+ * pathological loopback writes (defense-in-depth even on 127.0.0.1).
24
+ */
25
+ export const MEMORY_RPC_MAX_BODY_BYTES = 1024 * 1024;
26
+ /** Conservative namespace pattern: alphanumerics, dot, dash, underscore. ≤64 chars. */
27
+ const NAMESPACE_PATTERN = /^[a-zA-Z0-9._-]{1,64}$/;
28
+ /** Max key length. Aligns with the existing memory_entries TEXT column conventions. */
29
+ const KEY_MAX_LENGTH = 256;
30
+ /** Max ops per batch. Bounds memory + write time per request. */
31
+ export const BATCH_MAX_OPS = 100;
32
+ // ============================================================================
33
+ // JSON body reader (size-capped, never throws)
34
+ // ============================================================================
35
+ /**
36
+ * Read a JSON request body with a hard byte cap. Returns null on:
37
+ * - body exceeds {@link MEMORY_RPC_MAX_BODY_BYTES}
38
+ * - body is not valid JSON
39
+ * - request errors before completion
40
+ *
41
+ * Never throws. Caller maps null → 400.
42
+ */
43
+ async function readJsonBody(req) {
44
+ return new Promise((resolve) => {
45
+ let total = 0;
46
+ const chunks = [];
47
+ let done = false;
48
+ const finish = (result) => {
49
+ if (done)
50
+ return;
51
+ done = true;
52
+ resolve(result);
53
+ };
54
+ req.on('data', (chunk) => {
55
+ total += chunk.length;
56
+ if (total > MEMORY_RPC_MAX_BODY_BYTES) {
57
+ // Stop accumulating; let the request finish but discard.
58
+ chunks.length = 0;
59
+ finish(null);
60
+ return;
61
+ }
62
+ if (!done)
63
+ chunks.push(chunk);
64
+ });
65
+ req.on('end', () => {
66
+ if (done)
67
+ return;
68
+ try {
69
+ const raw = Buffer.concat(chunks).toString('utf8');
70
+ finish(raw.length === 0 ? null : JSON.parse(raw));
71
+ }
72
+ catch {
73
+ finish(null);
74
+ }
75
+ });
76
+ req.on('error', () => finish(null));
77
+ });
78
+ }
79
+ // ============================================================================
80
+ // Validation
81
+ // ============================================================================
82
+ function isNamespace(ns) {
83
+ return typeof ns === 'string' && NAMESPACE_PATTERN.test(ns);
84
+ }
85
+ function isKey(key) {
86
+ return typeof key === 'string' && key.length > 0 && key.length <= KEY_MAX_LENGTH;
87
+ }
88
+ function isStringArray(value) {
89
+ return Array.isArray(value) && value.every(v => typeof v === 'string');
90
+ }
91
+ function validateStorePayload(body) {
92
+ if (typeof body !== 'object' || body === null)
93
+ return { ok: false, error: 'body must be a JSON object' };
94
+ const b = body;
95
+ if (!isNamespace(b.namespace))
96
+ return { ok: false, error: `invalid namespace (must match ${NAMESPACE_PATTERN}, ≤64 chars)` };
97
+ if (!isKey(b.key))
98
+ return { ok: false, error: `invalid key (non-empty string ≤${KEY_MAX_LENGTH} chars)` };
99
+ if (b.value === undefined)
100
+ return { ok: false, error: 'value is required' };
101
+ if (b.tags !== undefined && !isStringArray(b.tags))
102
+ return { ok: false, error: 'tags must be an array of strings' };
103
+ if (b.ttl !== undefined && (typeof b.ttl !== 'number' || !Number.isFinite(b.ttl) || b.ttl <= 0)) {
104
+ return { ok: false, error: 'ttl must be a positive finite number (seconds)' };
105
+ }
106
+ return {
107
+ ok: true,
108
+ op: {
109
+ namespace: b.namespace,
110
+ key: b.key,
111
+ value: b.value,
112
+ tags: b.tags,
113
+ ttl: b.ttl,
114
+ },
115
+ };
116
+ }
117
+ function validateDeletePayload(body) {
118
+ if (typeof body !== 'object' || body === null)
119
+ return { ok: false, error: 'body must be a JSON object' };
120
+ const b = body;
121
+ if (!isNamespace(b.namespace))
122
+ return { ok: false, error: 'invalid namespace' };
123
+ if (!isKey(b.key))
124
+ return { ok: false, error: 'invalid key' };
125
+ return { ok: true, op: { namespace: b.namespace, key: b.key } };
126
+ }
127
+ function validateBatchPayload(body) {
128
+ if (typeof body !== 'object' || body === null)
129
+ return { ok: false, error: 'body must be a JSON object' };
130
+ const ops = body.ops;
131
+ if (!Array.isArray(ops) || ops.length === 0) {
132
+ return { ok: false, error: 'ops must be a non-empty array' };
133
+ }
134
+ if (ops.length > BATCH_MAX_OPS) {
135
+ return { ok: false, error: `batch too large (max ${BATCH_MAX_OPS} ops)` };
136
+ }
137
+ const validated = [];
138
+ for (let i = 0; i < ops.length; i++) {
139
+ const raw = ops[i];
140
+ const opType = raw?.op;
141
+ if (opType === 'store') {
142
+ const r = validateStorePayload(raw);
143
+ if (!r.ok)
144
+ return { ok: false, error: r.error, index: i };
145
+ validated.push({ op: 'store', ...r.op });
146
+ }
147
+ else if (opType === 'delete') {
148
+ const r = validateDeletePayload(raw);
149
+ if (!r.ok)
150
+ return { ok: false, error: r.error, index: i };
151
+ validated.push({ op: 'delete', ...r.op });
152
+ }
153
+ else {
154
+ return { ok: false, error: "op must be 'store' or 'delete'", index: i };
155
+ }
156
+ }
157
+ return { ok: true, ops: validated };
158
+ }
159
+ // ============================================================================
160
+ // JSON response helper (intentionally duplicates daemon-dashboard's helper —
161
+ // keeping this module standalone simplifies testing and avoids a circular
162
+ // dep when daemon-dashboard imports the route handlers below).
163
+ // ============================================================================
164
+ function sendJson(res, status, body) {
165
+ const json = JSON.stringify(body);
166
+ res.writeHead(status, {
167
+ 'Content-Type': 'application/json',
168
+ 'Content-Length': Buffer.byteLength(json),
169
+ 'Cache-Control': 'no-cache',
170
+ });
171
+ res.end(json);
172
+ }
173
+ function valueToString(value) {
174
+ return typeof value === 'string' ? value : JSON.stringify(value);
175
+ }
176
+ // ============================================================================
177
+ // Route handlers
178
+ // ============================================================================
179
+ /** Lazy import to avoid a circular dep with memory-initializer's heavy graph. */
180
+ async function getMemoryFns() {
181
+ const mod = await import('../memory/memory-initializer.js');
182
+ return { storeEntry: mod.storeEntry, deleteEntry: mod.deleteEntry };
183
+ }
184
+ /**
185
+ * POST /api/memory/store — write a single entry through the daemon's
186
+ * authoritative sql.js path. Upsert semantics (matches `memory_store`
187
+ * MCP tool default — see #962 for why this is required).
188
+ */
189
+ export async function handleMemoryStore(req, res, memory) {
190
+ if (!memory) {
191
+ sendJson(res, 503, { error: 'Memory accessor not attached' });
192
+ return;
193
+ }
194
+ const body = await readJsonBody(req);
195
+ if (body === null) {
196
+ sendJson(res, 400, { error: 'Invalid or oversized JSON body' });
197
+ return;
198
+ }
199
+ const v = validateStorePayload(body);
200
+ if (!v.ok) {
201
+ sendJson(res, 400, { error: 'Invalid store request', message: v.error });
202
+ return;
203
+ }
204
+ try {
205
+ const { storeEntry } = await getMemoryFns();
206
+ const result = await storeEntry({
207
+ key: v.op.key,
208
+ value: valueToString(v.op.value),
209
+ namespace: v.op.namespace,
210
+ tags: v.op.tags,
211
+ ttl: v.op.ttl,
212
+ upsert: true,
213
+ });
214
+ if (!result.success) {
215
+ sendJson(res, 500, { error: 'Store failed', message: result.error ?? 'unknown' });
216
+ return;
217
+ }
218
+ sendJson(res, 200, { ok: true, stored: true, id: result.id });
219
+ }
220
+ catch (err) {
221
+ sendJson(res, 500, { error: 'Internal error', message: errorDetail(err) });
222
+ }
223
+ }
224
+ /**
225
+ * POST /api/memory/delete — remove an entry via the daemon's authoritative
226
+ * write path.
227
+ */
228
+ export async function handleMemoryDelete(req, res, memory) {
229
+ if (!memory) {
230
+ sendJson(res, 503, { error: 'Memory accessor not attached' });
231
+ return;
232
+ }
233
+ const body = await readJsonBody(req);
234
+ if (body === null) {
235
+ sendJson(res, 400, { error: 'Invalid or oversized JSON body' });
236
+ return;
237
+ }
238
+ const v = validateDeletePayload(body);
239
+ if (!v.ok) {
240
+ sendJson(res, 400, { error: 'Invalid delete request', message: v.error });
241
+ return;
242
+ }
243
+ try {
244
+ const { deleteEntry } = await getMemoryFns();
245
+ const result = await deleteEntry({ key: v.op.key, namespace: v.op.namespace });
246
+ if (!result.success) {
247
+ sendJson(res, 500, { error: 'Delete failed', message: result.error ?? 'unknown' });
248
+ return;
249
+ }
250
+ sendJson(res, 200, { ok: true, deleted: result.deleted });
251
+ }
252
+ catch (err) {
253
+ sendJson(res, 500, { error: 'Internal error', message: errorDetail(err) });
254
+ }
255
+ }
256
+ /**
257
+ * POST /api/memory/batch — apply a sequence of store/delete ops.
258
+ *
259
+ * Validation is all-or-nothing: any invalid op fails the request with 400
260
+ * and no application. Application is sequential: a failure mid-stream
261
+ * returns 207 Multi-Status with structured per-op results so the caller
262
+ * can retry only the failures.
263
+ *
264
+ * True transactional atomicity isn't possible without a long-lived
265
+ * write-handle inside the daemon (a future evolution); for now this
266
+ * mirrors the existing `storeEntries` fallback semantics.
267
+ */
268
+ export async function handleMemoryBatch(req, res, memory) {
269
+ if (!memory) {
270
+ sendJson(res, 503, { error: 'Memory accessor not attached' });
271
+ return;
272
+ }
273
+ const body = await readJsonBody(req);
274
+ if (body === null) {
275
+ sendJson(res, 400, { error: 'Invalid or oversized JSON body' });
276
+ return;
277
+ }
278
+ const v = validateBatchPayload(body);
279
+ if (!v.ok) {
280
+ sendJson(res, 400, { error: 'Invalid batch request', message: v.error, index: v.index });
281
+ return;
282
+ }
283
+ const { storeEntry, deleteEntry } = await getMemoryFns();
284
+ const results = [];
285
+ let anyFailed = false;
286
+ for (const op of v.ops) {
287
+ try {
288
+ if (op.op === 'store') {
289
+ const r = await storeEntry({
290
+ key: op.key,
291
+ value: valueToString(op.value),
292
+ namespace: op.namespace,
293
+ tags: op.tags,
294
+ ttl: op.ttl,
295
+ upsert: true,
296
+ });
297
+ if (r.success) {
298
+ results.push({ ok: true, id: r.id });
299
+ }
300
+ else {
301
+ results.push({ ok: false, error: r.error ?? 'unknown' });
302
+ anyFailed = true;
303
+ }
304
+ }
305
+ else {
306
+ const r = await deleteEntry({ key: op.key, namespace: op.namespace });
307
+ if (r.success) {
308
+ results.push({ ok: true, deleted: r.deleted });
309
+ }
310
+ else {
311
+ results.push({ ok: false, error: r.error ?? 'unknown' });
312
+ anyFailed = true;
313
+ }
314
+ }
315
+ }
316
+ catch (err) {
317
+ results.push({ ok: false, error: errorDetail(err) });
318
+ anyFailed = true;
319
+ }
320
+ }
321
+ sendJson(res, anyFailed ? 207 : 200, { ok: !anyFailed, results });
322
+ }
323
+ export function matchMemoryRpcRoute(url) {
324
+ if (!url)
325
+ return null;
326
+ const path = url.split('?')[0];
327
+ if (path === '/api/memory/store')
328
+ return 'store';
329
+ if (path === '/api/memory/delete')
330
+ return 'delete';
331
+ if (path === '/api/memory/batch')
332
+ return 'batch';
333
+ return null;
334
+ }
335
+ //# sourceMappingURL=daemon-memory-rpc.js.map
@@ -23,6 +23,16 @@ export class WriteThroughAdapter {
23
23
  attached = false;
24
24
  boundHandler;
25
25
  stats = { written: 0, errors: 0, reaped: 0 };
26
+ /**
27
+ * #981 — track in-flight fire-and-forget writes so `clearNamespace` (and
28
+ * any future shutdown waiter) can await them before listing/deleting.
29
+ * Without this, a unified message that hits the bus shortly before
30
+ * shutdown's clearNamespace() races the listEntries query — its write
31
+ * lands AFTER list and survives the shutdown's deletes. Pre-#981 this
32
+ * race was hidden by sql.js multi-process clobber; under single-writer
33
+ * routing the write deterministically persists.
34
+ */
35
+ pendingWrites = new Set();
26
36
  constructor(bus, config, storeEntry, options) {
27
37
  this.bus = bus;
28
38
  this.config = config;
@@ -77,6 +87,10 @@ export class WriteThroughAdapter {
77
87
  async clearNamespace(namespace) {
78
88
  if (!this.listEntries || !this.deleteEntry)
79
89
  return;
90
+ // #981 — drain in-flight writes before listing. Without this the list
91
+ // can return BEFORE a queued write completes; the deletes that follow
92
+ // skip that row, and it survives the shutdown.
93
+ await this.drainPendingWrites();
80
94
  try {
81
95
  const result = await this.listEntries({ namespace, limit: 1000 });
82
96
  for (const entry of result.entries) {
@@ -87,13 +101,25 @@ export class WriteThroughAdapter {
87
101
  // Best-effort cleanup
88
102
  }
89
103
  }
104
+ /**
105
+ * Wait for every fire-and-forget write currently in flight to settle.
106
+ * No-op when no writes are queued. Bounded by `Promise.allSettled` so a
107
+ * single hung write can't block forever (each individual write has its
108
+ * own daemon-write-client timeout).
109
+ */
110
+ async drainPendingWrites() {
111
+ if (this.pendingWrites.size === 0)
112
+ return;
113
+ await Promise.allSettled([...this.pendingWrites]);
114
+ }
90
115
  onUnifiedMessage(event) {
91
116
  if (!event.namespace || !this.enabledNamespaces.has(event.namespace)) {
92
117
  return;
93
118
  }
94
119
  const ttlSeconds = event.ttlMs ? Math.ceil(event.ttlMs / 1000) : undefined;
95
- // Fire-and-forget write to Memory DB
96
- this.storeEntry({
120
+ // Fire-and-forget write to Memory DB. The promise is tracked in
121
+ // `pendingWrites` so `clearNamespace`/`drainPendingWrites` can await it.
122
+ const writePromise = this.storeEntry({
97
123
  key: `msg:${event.messageId}`,
98
124
  value: JSON.stringify({
99
125
  id: event.messageId,
@@ -115,6 +141,8 @@ export class WriteThroughAdapter {
115
141
  }).catch(() => {
116
142
  this.stats.errors++;
117
143
  });
144
+ this.pendingWrites.add(writePromise);
145
+ writePromise.finally(() => { this.pendingWrites.delete(writePromise); });
118
146
  }
119
147
  /**
120
148
  * DB reaper: cleans expired entries from Memory DB.
@@ -2,5 +2,5 @@
2
2
  * Auto-generated by build. Do not edit manually.
3
3
  * Source of truth: root package.json → scripts/sync-version.mjs
4
4
  */
5
- export const VERSION = '4.9.26';
5
+ export const VERSION = '4.9.28';
6
6
  //# sourceMappingURL=version.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "moflo",
3
- "version": "4.9.26",
3
+ "version": "4.9.28",
4
4
  "description": "MoFlo — AI agent orchestration for Claude Code. A standalone, opinionated toolkit with semantic memory, learned routing, gates, spells, and the /flo issue-execution skill.",
5
5
  "main": "dist/src/cli/index.js",
6
6
  "type": "module",
@@ -84,7 +84,7 @@
84
84
  "@typescript-eslint/eslint-plugin": "^7.18.0",
85
85
  "@typescript-eslint/parser": "^7.18.0",
86
86
  "eslint": "^8.0.0",
87
- "moflo": "^4.9.25",
87
+ "moflo": "^4.9.27",
88
88
  "tsx": "^4.21.0",
89
89
  "typescript": "^5.9.3",
90
90
  "vitest": "^4.0.0"
@@ -87,8 +87,12 @@ steps:
87
87
  # elevated — bwrap network access for git pull (see single-branch create-branch).
88
88
  permissionLevel: elevated
89
89
  preflight:
90
+ # `git diff --name-only --diff-filter=U` always exits 0 — it just
91
+ # lists paths. Use `--quiet`, which exits 1 when any unmerged path
92
+ # exists, so this preflight actually catches a half-merged index
93
+ # (e.g. left by an earlier failed run).
90
94
  - name: "no unmerged files"
91
- command: "git diff --name-only --diff-filter=U"
95
+ command: "git diff --quiet --diff-filter=U"
92
96
  expectExitCode: 0
93
97
  hint: "You have unresolved merge conflicts. Resolve them and commit before running this spell."
94
98
  - name: "working tree clean (tracked changes)"
@@ -97,7 +101,7 @@ steps:
97
101
  hint: "You have uncommitted changes to tracked files. If you want them carried onto the epic branch, pick 'Stash and carry over'."
98
102
  resolutions:
99
103
  - label: "Stash changes and carry them onto the epic branch"
100
- command: "git stash push --include-untracked --message 'moflo-epic-autostash'"
104
+ command: "git stash push --include-untracked --message 'moflo-epic-{args.epic_number}-autostash'"
101
105
  - label: "Commit changes to the current branch first, then continue"
102
106
  command: "git commit -am 'wip: pre-epic snapshot'"
103
107
  - name: "working tree clean (staged changes)"
@@ -106,7 +110,7 @@ steps:
106
110
  hint: "You have staged changes that aren't committed. If you want them carried onto the epic branch, pick 'Stash and carry over'."
107
111
  resolutions:
108
112
  - label: "Stash staged changes and carry them onto the epic branch"
109
- command: "git stash push --include-untracked --message 'moflo-epic-autostash'"
113
+ command: "git stash push --include-untracked --message 'moflo-epic-{args.epic_number}-autostash'"
110
114
  - label: "Commit staged changes to the current branch first, then continue"
111
115
  command: "git commit -m 'wip: pre-epic snapshot'"
112
116
  - name: "gh cli authenticated"
@@ -116,9 +120,28 @@ steps:
116
120
  command: "git remote get-url origin"
117
121
  hint: "This repo has no 'origin' remote. Set one with: git remote add origin <url>"
118
122
  config:
119
- # set -e: fail fast if checkout/pull fails; otherwise the trailing
120
- # `stash pop ... || true` would mask the real failure.
121
- command: "set -e; git stash --include-untracked -q 2>/dev/null || true; git checkout {args.base_branch}; git pull origin {args.base_branch}; git stash pop -q 2>/dev/null || true"
123
+ # set -e: fail fast if checkout/pull fails.
124
+ #
125
+ # We deliberately do NOT auto-stash here. Preflight enforces a clean
126
+ # tree (or runs the user-chosen stash-and-carry resolution), so the
127
+ # only stash we should ever pop is the preflight's
128
+ # `moflo-epic-autostash` marker. Popping the unconditionally-top
129
+ # stash (the previous design) was the source of #287's stash-pop
130
+ # conflict that left the index half-merged.
131
+ command: |
132
+ set -e
133
+ git checkout {args.base_branch}
134
+ git pull origin {args.base_branch}
135
+ # Scoped by epic_number to prevent a stale autostash from an
136
+ # unrelated abandoned run getting popped here.
137
+ EXPECTED_STASH_TAG="moflo-epic-{args.epic_number}-autostash"
138
+ TOP_MSG=$(git stash list --format='%s' -1)
139
+ if [[ "$TOP_MSG" == *"$EXPECTED_STASH_TAG"* ]]; then
140
+ if ! git stash pop -q; then
141
+ echo "[epic] carrying-over stash conflicted on {args.base_branch} — resolve unmerged paths and re-run the spell" >&2
142
+ exit 1
143
+ fi
144
+ fi
122
145
  failOnError: true
123
146
 
124
147
  # 2: Spawn Claude agent to implement the story (creates branch + PR)
@@ -155,7 +178,20 @@ steps:
155
178
  permissionLevel: elevated
156
179
  config:
157
180
  # set -e: fail fast if checkout/pull fails (see checkout-base).
158
- command: "set -e; git stash --include-untracked -q 2>/dev/null || true; git checkout {args.base_branch}; git pull origin {args.base_branch}; git stash pop -q 2>/dev/null || true"
181
+ # No in-step stash; pop only the preflight's autostash marker
182
+ # (scoped by epic_number — see checkout-base).
183
+ command: |
184
+ set -e
185
+ git checkout {args.base_branch}
186
+ git pull origin {args.base_branch}
187
+ EXPECTED_STASH_TAG="moflo-epic-{args.epic_number}-autostash"
188
+ TOP_MSG=$(git stash list --format='%s' -1)
189
+ if [[ "$TOP_MSG" == *"$EXPECTED_STASH_TAG"* ]]; then
190
+ if ! git stash pop -q; then
191
+ echo "[epic] carrying-over stash conflicted on {args.base_branch} — resolve unmerged paths and re-run the spell" >&2
192
+ exit 1
193
+ fi
194
+ fi
159
195
  failOnError: true
160
196
 
161
197
  # 6: Comment on epic with progress
@@ -87,8 +87,12 @@ steps:
87
87
  # has working network.
88
88
  permissionLevel: elevated
89
89
  preflight:
90
+ # `git diff --name-only --diff-filter=U` always exits 0 — it just lists
91
+ # paths. Use `--quiet`, which exits 1 when any unmerged path exists, so
92
+ # the preflight actually catches a half-merged index left by an earlier
93
+ # failed run (e.g. a stash-pop conflict).
90
94
  - name: "no unmerged files"
91
- command: "git diff --name-only --diff-filter=U"
95
+ command: "git diff --quiet --diff-filter=U"
92
96
  expectExitCode: 0
93
97
  hint: "You have unresolved merge conflicts. Resolve them and commit before running this spell."
94
98
  - name: "working tree clean (tracked changes)"
@@ -97,7 +101,7 @@ steps:
97
101
  hint: "You have uncommitted changes to tracked files. If you want them carried onto the epic branch, pick 'Stash and carry over'."
98
102
  resolutions:
99
103
  - label: "Stash changes and carry them onto the epic branch"
100
- command: "git stash push --include-untracked --message 'moflo-epic-autostash'"
104
+ command: "git stash push --include-untracked --message 'moflo-epic-{args.epic_number}-autostash'"
101
105
  - label: "Commit changes to the current branch first, then continue"
102
106
  command: "git commit -am 'wip: pre-epic snapshot'"
103
107
  - name: "working tree clean (staged changes)"
@@ -106,18 +110,45 @@ steps:
106
110
  hint: "You have staged changes that aren't committed. If you want them carried onto the epic branch, pick 'Stash and carry over'."
107
111
  resolutions:
108
112
  - label: "Stash staged changes and carry them onto the epic branch"
109
- command: "git stash push --include-untracked --message 'moflo-epic-autostash'"
113
+ command: "git stash push --include-untracked --message 'moflo-epic-{args.epic_number}-autostash'"
110
114
  - label: "Commit staged changes to the current branch first, then continue"
111
115
  command: "git commit -m 'wip: pre-epic snapshot'"
112
116
  - name: "gh cli authenticated"
113
117
  command: "gh auth status"
114
118
  hint: "The GitHub CLI isn't signed in. Run: gh auth login"
115
119
  config:
116
- # set -e: any failing step aborts the whole command. Without it, the
117
- # trailing `git stash pop ... || true` would swallow real failures
118
- # (e.g., checkout/pull/branch-create errors) and report success,
119
- # leaving later steps to crash when the epic branch isn't there.
120
- command: "set -e; git stash --include-untracked -q 2>/dev/null || true; git checkout {args.base_branch}; git pull origin {args.base_branch}; BRANCH=\"epic/{args.epic_number}-{args.epic_slug}\"; if git show-ref --verify --quiet \"refs/heads/$BRANCH\"; then git checkout \"$BRANCH\"; else git checkout -b \"$BRANCH\"; fi; git stash pop -q 2>/dev/null || true"
120
+ # set -e: any failing step aborts the whole command.
121
+ #
122
+ # We deliberately do NOT auto-stash here. Preflight has already enforced
123
+ # a clean tree (or run the user-chosen stash-and-carry resolution), so
124
+ # the only stash we should ever pop is the preflight's `moflo-epic-autostash`
125
+ # marker. Popping the unconditionally-top stash (the previous design)
126
+ # was the source of #287's stash-pop conflict that left the index
127
+ # half-merged — `git stash pop ... || true` masked the conflict
128
+ # failure and the next step's `git checkout` then died with
129
+ # "you need to resolve your current index first".
130
+ command: |
131
+ set -e
132
+ git checkout {args.base_branch}
133
+ git pull origin {args.base_branch}
134
+ BRANCH="epic/{args.epic_number}-{args.epic_slug}"
135
+ if git show-ref --verify --quiet "refs/heads/$BRANCH"; then
136
+ git checkout "$BRANCH"
137
+ else
138
+ git checkout -b "$BRANCH"
139
+ fi
140
+ # Scope the marker by epic_number so a stale autostash from an
141
+ # abandoned previous run of a DIFFERENT epic doesn't get popped onto
142
+ # this branch unintentionally. Same-epic re-runs still carry over,
143
+ # which is the intended recovery path.
144
+ EXPECTED_STASH_TAG="moflo-epic-{args.epic_number}-autostash"
145
+ TOP_MSG=$(git stash list --format='%s' -1)
146
+ if [[ "$TOP_MSG" == *"$EXPECTED_STASH_TAG"* ]]; then
147
+ if ! git stash pop -q; then
148
+ echo "[epic] carrying-over stash conflicted on $BRANCH — resolve unmerged paths and re-run the spell" >&2
149
+ exit 1
150
+ fi
151
+ fi
121
152
  timeout: 120000
122
153
  failOnError: true
123
154