@lhi/n8m 0.2.4 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -104,7 +104,7 @@ export class AIService {
104
104
  },
105
105
  body: JSON.stringify({
106
106
  model,
107
- max_tokens: 4096,
107
+ max_tokens: options.maxTokens ?? 4096,
108
108
  messages: [{ role: 'user', content: prompt }],
109
109
  temperature: options.temperature ?? 0.7,
110
110
  })
@@ -205,6 +205,39 @@ export class AIService {
205
205
  throw new Error(`invalid JSON returned by AI: ${cleanJson}`);
206
206
  }
207
207
  }
208
+ async chatAboutSpec(spec, history, userMessage) {
209
+ const conversationText = history
210
+ .map(h => `${h.role === 'user' ? 'User' : 'Architect'}: ${h.content}`)
211
+ .join('\n');
212
+ const prompt = `You are an n8n Workflow Architect having a planning conversation with the user.
213
+
214
+ Current Workflow Spec:
215
+ ${JSON.stringify(spec, null, 2)}
216
+
217
+ ${conversationText ? `Conversation so far:\n${conversationText}\n` : ''}User: ${userMessage}
218
+
219
+ Respond conversationally to help the user understand or refine the plan. If the user requests changes to the workflow approach, update the spec accordingly.
220
+
221
+ Output a JSON object:
222
+ {
223
+ "reply": "Your conversational response here",
224
+ "updatedSpec": { /* full spec JSON — same structure as input, with any requested changes applied */ }
225
+ }
226
+
227
+ Output ONLY valid JSON. No markdown.`;
228
+ const response = await this.generateContent(prompt);
229
+ const cleanJson = response.replace(/```json\n?|\n?```/g, '').trim();
230
+ try {
231
+ const result = JSON.parse(jsonrepair(cleanJson));
232
+ return {
233
+ reply: result.reply || '',
234
+ updatedSpec: result.updatedSpec || spec,
235
+ };
236
+ }
237
+ catch {
238
+ return { reply: response, updatedSpec: spec };
239
+ }
240
+ }
208
241
  async generateWorkflow(goal) {
209
242
  const prompt = `You are an n8n Expert.
210
243
  Generate a valid n8n workflow JSON for the following goal: "${goal}".
@@ -244,6 +277,196 @@ export class AIService {
244
277
  return { ...primarySpec, suggestedName: primarySpec.suggestedName + " (Alt)", strategyName: 'alternative' };
245
278
  }
246
279
  }
280
+ async generateModificationPlan(instruction, workflowJson) {
281
+ const nodeList = (workflowJson.nodes || [])
282
+ .map((n) => `${n.name} (${n.type})`)
283
+ .join(', ');
284
+ const prompt = `You are an n8n Solution Architect reviewing a workflow modification request.
285
+
286
+ Workflow: "${workflowJson.name || 'Untitled'}"
287
+ Current nodes: ${nodeList}
288
+
289
+ Modification requested: "${instruction}"
290
+
291
+ Analyze the request and produce a concise modification plan as a JSON object:
292
+ {
293
+ "suggestedName": "Updated workflow name (or same if unchanged)",
294
+ "description": "One-sentence summary of what this modification achieves",
295
+ "proposedChanges": ["Specific change 1", "Specific change 2"],
296
+ "affectedNodes": ["Node names that will be added, modified, or removed"]
297
+ }
298
+ Output ONLY the JSON object. No commentary.`;
299
+ const response = await this.generateContent(prompt);
300
+ const cleanJson = response.replace(/```json\n?|\n?```/g, '').trim();
301
+ try {
302
+ return JSON.parse(jsonrepair(cleanJson));
303
+ }
304
+ catch {
305
+ return {
306
+ suggestedName: workflowJson.name || 'Modified Workflow',
307
+ description: instruction,
308
+ proposedChanges: [instruction],
309
+ affectedNodes: [],
310
+ };
311
+ }
312
+ }
313
+ async applyModification(workflowJson, userGoal, spec, userFeedback, validNodeTypes = []) {
314
+ const nodeService = NodeDefinitionsService.getInstance();
315
+ const staticRef = nodeService.getStaticReference();
316
+ const prompt = `You are an n8n Workflow Engineer. Modify the following existing workflow.
317
+
318
+ ORIGINAL WORKFLOW:
319
+ ${JSON.stringify(workflowJson, null, 2)}
320
+
321
+ MODIFICATION INSTRUCTION:
322
+ ${userGoal}
323
+
324
+ MODIFICATION PLAN:
325
+ ${JSON.stringify(spec, null, 2)}
326
+ ${userFeedback ? `\nUSER FEEDBACK:\n${userFeedback}\n` : ''}
327
+ [N8N NODE REFERENCE GUIDE]
328
+ ${staticRef}
329
+
330
+ ${validNodeTypes.length > 0 ? `Valid node types: ${validNodeTypes.slice(0, 100).join(', ')}` : ''}
331
+
332
+ Apply ALL proposed changes. Then verify the following before outputting:
333
+
334
+ CONNECTION RULES — EVERY node must appear in the connections object:
335
+ 1. Every non-trigger node must have at least one incoming connection from another node.
336
+ 2. Every node that is not a terminal/sink must have at least one outgoing connection.
337
+ 3. New nodes inserted into the middle of the flow must be wired into BOTH the incoming edge (from the predecessor) AND the outgoing edge (to the successor) — do not leave gaps in the chain.
338
+ 4. If the original workflow had error output connections (e.g. "error" branch), replicate that pattern for any new nodes that have onError: "continueErrorOutput".
339
+ 5. The connections object keys are SOURCE node names; the "node" field inside is the TARGET node name. Double-check every name matches exactly.
340
+
341
+ Preserve all existing nodes, connections, credentials, and IDs not mentioned in the plan. Add new nodes with unique string IDs.
342
+
343
+ Output ONLY the complete workflow JSON object. No commentary. No markdown.`;
344
+ const response = await this.generateContent(prompt, { maxTokens: 8192 });
345
+ const cleanJson = response.replace(/```json\n?|\n?```/g, '').trim();
346
+ try {
347
+ const result = JSON.parse(jsonrepair(cleanJson));
348
+ const modified = result.workflows?.[0] || result;
349
+ return this.wireOrphanedErrorHandlers(this.fixHallucinatedNodes(this.repairConnections(modified, workflowJson)));
350
+ }
351
+ catch (e) {
352
+ console.error('Failed to parse modified workflow JSON', e);
353
+ return workflowJson;
354
+ }
355
+ }
356
+ /**
357
+ * Merge connections from the original workflow into the modified one for any
358
+ * nodes that exist in both but lost their connections during LLM generation.
359
+ * Then does a position-based stitch for any remaining nodes with no outgoing
360
+ * main connection, using canvas x/y position to infer the intended chain order.
361
+ */
362
+ repairConnections(modified, original) {
363
+ if (!modified?.nodes || !modified?.connections)
364
+ return modified;
365
+ const connections = { ...(modified.connections || {}) };
366
+ const origConnections = original?.connections || {};
367
+ const nodeNames = new Set(modified.nodes.map((n) => n.name));
368
+ // 1. Restore original connections for nodes that exist in both but lost theirs.
369
+ // Operates per output-type so that nodes with partial connections (e.g. LLM
370
+ // generated "main" but dropped "error") still get their missing types restored.
371
+ for (const [srcName, srcConn] of Object.entries(origConnections)) {
372
+ if (!nodeNames.has(srcName))
373
+ continue;
374
+ const existingConn = connections[srcName] || {};
375
+ let changed = false;
376
+ const merged = { ...existingConn };
377
+ for (const [outputType, branches] of Object.entries(srcConn)) {
378
+ if (existingConn[outputType])
379
+ continue; // this output type already present — keep LLM version
380
+ const filteredBranches = branches.map((branch) => branch.filter((edge) => nodeNames.has(edge.node))).filter((branch) => branch.length > 0);
381
+ if (filteredBranches.length > 0) {
382
+ merged[outputType] = filteredBranches;
383
+ changed = true;
384
+ }
385
+ }
386
+ if (changed)
387
+ connections[srcName] = merged;
388
+ }
389
+ // 2. Position-based chain stitching for nodes still missing outgoing main connections.
390
+ // Group nodes by approximate y-row (round to nearest 300px), sort each row by x.
391
+ // For any node with no outgoing main connection, wire it to the next node in its row.
392
+ const nodes = modified.nodes;
393
+ // 1b. Restore onError settings that the LLM may have stripped from nodes.
394
+ const origNodeMap = new Map((original?.nodes ?? []).map((n) => [n.name, n]));
395
+ for (const node of nodes) {
396
+ const orig = origNodeMap.get(node.name);
397
+ if (orig?.onError && !node.onError) {
398
+ node.onError = orig.onError;
399
+ }
400
+ }
401
+ // 1c. Wire error connections for any node with onError:"continueErrorOutput" that lacks one.
402
+ // Covers new nodes the LLM added to the flow (not present in original) — step 1 can't restore
403
+ // connections for those. Infer the error handler from what the original connected to.
404
+ const errorHandlerNodes = new Set();
405
+ for (const srcConn of Object.values(origConnections)) {
406
+ for (const branch of (srcConn.error ?? [])) {
407
+ for (const edge of branch) {
408
+ if (nodeNames.has(edge.node))
409
+ errorHandlerNodes.add(edge.node);
410
+ }
411
+ }
412
+ }
413
+ if (errorHandlerNodes.size > 0) {
414
+ const errorHandler = [...errorHandlerNodes][0];
415
+ for (const node of nodes) {
416
+ if (node.onError !== 'continueErrorOutput')
417
+ continue;
418
+ if (connections[node.name]?.error?.length > 0)
419
+ continue;
420
+ connections[node.name] = {
421
+ ...(connections[node.name] || {}),
422
+ error: [[{ node: errorHandler, type: 'main', index: 0 }]],
423
+ };
424
+ }
425
+ }
426
+ // 1d. Remove LLM-hallucinated main connections from nodes that were terminal in the original.
427
+ // A node is terminal if it existed in the original but had no outgoing main connections there.
428
+ const origNodeNames = new Set((original?.nodes ?? []).map((n) => n.name));
429
+ for (const node of nodes) {
430
+ if (!origNodeNames.has(node.name))
431
+ continue; // new node — leave LLM connections alone
432
+ const origConn = origConnections[node.name];
433
+ const hadMain = origConn?.main?.some((b) => b.length > 0);
434
+ if (!hadMain && connections[node.name]?.main?.length > 0) {
435
+ const nc = { ...(connections[node.name] || {}) };
436
+ delete nc.main;
437
+ if (Object.keys(nc).length > 0) {
438
+ connections[node.name] = nc;
439
+ }
440
+ else {
441
+ delete connections[node.name];
442
+ }
443
+ }
444
+ }
445
+ const rowMap = new Map();
446
+ for (const node of nodes) {
447
+ const x = node.position?.[0] ?? 0;
448
+ const y = node.position?.[1] ?? 0;
449
+ const rowKey = Math.round(y / 300) * 300;
450
+ if (!rowMap.has(rowKey))
451
+ rowMap.set(rowKey, []);
452
+ rowMap.get(rowKey).push({ ...node, _x: x });
453
+ }
454
+ for (const row of rowMap.values()) {
455
+ row.sort((a, b) => a._x - b._x);
456
+ for (let i = 0; i < row.length - 1; i++) {
457
+ const src = row[i];
458
+ const tgt = row[i + 1];
459
+ // Only stitch if this node has NO outgoing main connections yet
460
+ if (connections[src.name]?.main?.length > 0)
461
+ continue;
462
+ connections[src.name] = {
463
+ ...(connections[src.name] || {}),
464
+ main: [[{ node: tgt.name, type: 'main', index: 0 }]],
465
+ };
466
+ }
467
+ }
468
+ return { ...modified, connections };
469
+ }
247
470
  async generateWorkflowFix(workflow, error, model, _stream = false, validNodeTypes = []) {
248
471
  const nodeService = NodeDefinitionsService.getInstance();
249
472
  const staticRef = nodeService.getStaticReference();
@@ -265,7 +488,8 @@ export class AIService {
265
488
  const cleanJson = response.replace(/```json\n?|\n?```/g, "").trim();
266
489
  try {
267
490
  const fixed = JSON.parse(jsonrepair(cleanJson));
268
- return fixed.workflows?.[0] || fixed;
491
+ const result = fixed.workflows?.[0] || fixed;
492
+ return this.wireOrphanedErrorHandlers(this.fixHallucinatedNodes(this.repairConnections(result, workflow)));
269
493
  }
270
494
  catch (e) {
271
495
  console.error("Failed to parse fix JSON", e);
@@ -329,6 +553,60 @@ export class AIService {
329
553
  });
330
554
  return this.fixN8nConnections(workflow);
331
555
  }
556
+ /**
557
+ * Wire orphaned error-handler nodes that the LLM created but forgot to connect.
558
+ * Detects nodes with no incoming connections whose name suggests they are error
559
+ * handlers (contains "Error", "Cleanup", "Rollback", "Fallback", etc.) and wires
560
+ * every non-terminal, non-handler node's error output to them.
561
+ * Also sets onError:"continueErrorOutput" on each wired source node.
562
+ */
563
+ wireOrphanedErrorHandlers(workflow) {
564
+ if (!workflow?.nodes || !workflow?.connections)
565
+ return workflow;
566
+ const nodes = workflow.nodes;
567
+ const connections = { ...(workflow.connections || {}) };
568
+ // Build set of nodes that have at least one incoming connection.
569
+ const hasIncoming = new Set();
570
+ for (const srcConn of Object.values(connections)) {
571
+ for (const branches of Object.values(srcConn)) {
572
+ for (const branch of branches) {
573
+ for (const edge of branch) {
574
+ if (edge?.node)
575
+ hasIncoming.add(edge.node);
576
+ }
577
+ }
578
+ }
579
+ }
580
+ const TRIGGER_TYPES = /trigger|webhook|cron|schedule|interval|timer|poller|gmail|rss/i;
581
+ const ERROR_HANDLER_PATTERN = /error|cleanup|rollback|fallback|on.?fail|recover/i;
582
+ // Orphaned nodes = no incoming connection, not a trigger, name looks like an error handler.
583
+ const errorHandlers = nodes.filter(n => !hasIncoming.has(n.name) &&
584
+ !TRIGGER_TYPES.test(n.type || '') &&
585
+ ERROR_HANDLER_PATTERN.test(n.name));
586
+ if (errorHandlers.length === 0)
587
+ return workflow;
588
+ // Non-terminal nodes = have at least one outgoing main connection.
589
+ const nonTerminal = new Set();
590
+ for (const [srcName, srcConn] of Object.entries(connections)) {
591
+ const mainBranches = srcConn.main;
592
+ if (mainBranches?.some((b) => b.length > 0)) {
593
+ nonTerminal.add(srcName);
594
+ }
595
+ }
596
+ for (const handler of errorHandlers) {
597
+ const sources = nodes.filter(n => nonTerminal.has(n.name) &&
598
+ !ERROR_HANDLER_PATTERN.test(n.name) &&
599
+ !(connections[n.name]?.error?.length > 0));
600
+ for (const src of sources) {
601
+ src.onError = 'continueErrorOutput';
602
+ connections[src.name] = {
603
+ ...(connections[src.name] || {}),
604
+ error: [[{ node: handler.name, type: 'main', index: 0 }]],
605
+ };
606
+ }
607
+ }
608
+ return { ...workflow, connections };
609
+ }
332
610
  fixN8nConnections(workflow) {
333
611
  if (!workflow.connections || typeof workflow.connections !== 'object')
334
612
  return workflow;
@@ -447,6 +725,63 @@ Return ONLY the replacement JavaScript code. No markdown fences, no explanation.
447
725
  const response = await this.generateContent(prompt, { temperature: 0.1 });
448
726
  return response.replace(/^```(?:javascript|js)?\n?|\n?```$/g, '').trim();
449
727
  }
728
+ /**
729
+ * Analyze a validated working workflow and generate a reusable pattern file.
730
+ * Returns markdown content ready to save to docs/patterns/.
731
+ */
732
+ async generatePattern(workflowJson) {
733
+ const stripped = {
734
+ name: workflowJson.name,
735
+ nodes: (workflowJson.nodes || []).map((n) => ({
736
+ name: n.name,
737
+ type: n.type,
738
+ typeVersion: n.typeVersion,
739
+ parameters: n.parameters,
740
+ })),
741
+ connections: workflowJson.connections,
742
+ };
743
+ const prompt = `You are an n8n workflow expert analyzing a VALIDATED, WORKING n8n workflow.
744
+ Your job is to extract the reusable knowledge from this workflow into a pattern file that will teach an AI engineer to build similar workflows correctly.
745
+
746
+ Workflow JSON:
747
+ ${JSON.stringify(stripped, null, 2)}
748
+
749
+ Generate a markdown pattern file with the following structure:
750
+
751
+ 1. First line MUST be: <!-- keywords: <comma-separated keywords> -->
752
+ - Keywords should cover: service names, operations, node types, integration categories
753
+ - Example: <!-- keywords: bigquery, google bigquery, sql, merge, staging, http request -->
754
+
755
+ 2. A short title: # Pattern: <descriptive title>
756
+
757
+ 3. ## Critical Rules
758
+ - List any gotchas, wrong approaches to avoid, or non-obvious choices made in this workflow
759
+ - Be specific: e.g. "Use n8n-nodes-base.httpRequest instead of n8n-nodes-base.googleBigQuery because..."
760
+ - If there are no critical rules, omit this section
761
+
762
+ 4. ## Authentication
763
+ - Document the credential type and any required scopes/permissions
764
+ - Only include if the workflow uses credentials
765
+
766
+ 5. One section per major technique demonstrated, e.g.:
767
+ ## <Technique Name>
768
+ - Explain what it does and why
769
+ - Include the relevant node config as a JSON code block (use actual values from the workflow, anonymize project IDs to YOUR_PROJECT etc.)
770
+ - Note any important parameter choices
771
+
772
+ 6. ## Error Handling (if the workflow has error paths)
773
+ - Explain the error handling strategy
774
+
775
+ Keep the pattern focused and actionable. An AI reading this should be able to reproduce the technique correctly.
776
+ Output ONLY the markdown content. No commentary before or after.`;
777
+ const content = await this.generateContent(prompt, { temperature: 0.3 });
778
+ // Derive a filename slug from the workflow name
779
+ const slug = (workflowJson.name || 'workflow')
780
+ .toLowerCase()
781
+ .replace(/[^a-z0-9]+/g, '-')
782
+ .replace(/^-|-$/g, '');
783
+ return { content, slug };
784
+ }
450
785
  async evaluateCandidates(goal, candidates) {
451
786
  if (candidates.length === 0)
452
787
  return { selectedIndex: 0, reason: "No candidates" };
@@ -6,6 +6,8 @@ export interface ReducedNodeDefinition {
6
6
  }
7
7
  export declare class NodeDefinitionsService {
8
8
  private static instance;
9
+ /** Override candidate dirs for testing only. When set, replaces the default dir list in searchPatterns(). */
10
+ static _testPatternsDirs: string[] | null;
9
11
  private definitions;
10
12
  private client;
11
13
  private defaultClient;
@@ -35,6 +37,12 @@ export declare class NodeDefinitionsService {
35
37
  * We keep properties (parameters) but strip UI metadata.
36
38
  */
37
39
  private reduceDefinition;
40
+ /**
41
+ * Search pattern library for examples matching the query.
42
+ * Patterns are markdown files in docs/patterns/ with a keywords frontmatter line.
43
+ * Format: <!-- keywords: bigquery, http, google -->
44
+ */
45
+ searchPatterns(query: string): string[];
38
46
  /**
39
47
  * Format definitions for LLM System Prompt
40
48
  */
@@ -7,6 +7,8 @@ const __filename = fileURLToPath(import.meta.url);
7
7
  const __dirname = path.dirname(__filename);
8
8
  export class NodeDefinitionsService {
9
9
  static instance;
10
+ /** Override candidate dirs for testing only. When set, replaces the default dir list in searchPatterns(). */
11
+ static _testPatternsDirs = null;
10
12
  definitions = [];
11
13
  client;
12
14
  defaultClient;
@@ -137,6 +139,49 @@ export class NodeDefinitionsService {
137
139
  }))
138
140
  };
139
141
  }
142
+ /**
143
+ * Search pattern library for examples matching the query.
144
+ * Patterns are markdown files in docs/patterns/ with a keywords frontmatter line.
145
+ * Format: <!-- keywords: bigquery, http, google -->
146
+ */
147
+ searchPatterns(query) {
148
+ const lowerQuery = query.toLowerCase();
149
+ const terms = lowerQuery.split(/\s+/).filter(t => t.length > 2);
150
+ if (terms.length === 0)
151
+ return [];
152
+ // Search order: user's .n8m/patterns first, then built-in docs/patterns
153
+ const candidateDirs = NodeDefinitionsService._testPatternsDirs ?? [
154
+ path.join(process.cwd(), '.n8m', 'patterns'),
155
+ path.join(__dirname, '..', '..', 'docs', 'patterns'), // dist
156
+ path.join(__dirname, '..', '..', '..', 'docs', 'patterns'), // src (dev)
157
+ ];
158
+ const matched = [];
159
+ const seen = new Set(); // deduplicate by filename
160
+ for (const dir of candidateDirs) {
161
+ if (!fs.existsSync(dir))
162
+ continue;
163
+ try {
164
+ const files = fs.readdirSync(dir).filter(f => f.endsWith('.md'));
165
+ for (const file of files) {
166
+ if (seen.has(file))
167
+ continue; // user pattern overrides built-in of same name
168
+ const content = fs.readFileSync(path.join(dir, file), 'utf-8');
169
+ const keywordsMatch = content.match(/<!--\s*keywords:\s*([^\-]+)-->/i);
170
+ if (!keywordsMatch)
171
+ continue;
172
+ seen.add(file);
173
+ const fileKeywords = keywordsMatch[1].toLowerCase();
174
+ if (terms.some(t => fileKeywords.includes(t))) {
175
+ matched.push(content);
176
+ }
177
+ }
178
+ }
179
+ catch {
180
+ // silently skip unreadable dirs
181
+ }
182
+ }
183
+ return matched;
184
+ }
140
185
  /**
141
186
  * Format definitions for LLM System Prompt
142
187
  */
@@ -3,6 +3,10 @@ export interface WorkflowFixture {
3
3
  capturedAt: string;
4
4
  workflowId: string;
5
5
  workflowName: string;
6
+ /** Human-readable label for this test case */
7
+ description?: string;
8
+ /** Whether this case should pass or fail. Defaults to 'pass'. */
9
+ expectedOutcome?: 'pass' | 'fail';
6
10
  workflow: any;
7
11
  execution: {
8
12
  id?: string;
@@ -20,9 +24,15 @@ export declare class FixtureManager {
20
24
  private fixturesDir;
21
25
  constructor();
22
26
  private fixturePath;
27
+ private fixtureDir;
23
28
  exists(workflowId: string): boolean;
29
+ /** Load all fixtures for a workflow. Supports both directory (new) and single-file (legacy) formats. */
30
+ loadAll(workflowId: string): WorkflowFixture[];
24
31
  load(workflowId: string): WorkflowFixture | null;
25
32
  loadFromPath(filePath: string): WorkflowFixture | null;
26
33
  getCapturedDate(workflowId: string): Date | null;
34
+ /** Save a named fixture into the per-workflow directory (new multi-fixture format). */
35
+ saveNamed(fixture: WorkflowFixture, name: string): Promise<void>;
36
+ /** Legacy single-file save (used by offerSaveFixture after live runs). */
27
37
  save(fixture: WorkflowFixture): Promise<void>;
28
38
  }
@@ -1,5 +1,5 @@
1
1
  import fs from 'fs/promises';
2
- import { existsSync, readFileSync } from 'fs';
2
+ import { existsSync, readFileSync, readdirSync } from 'fs';
3
3
  import path from 'path';
4
4
  export class FixtureManager {
5
5
  fixturesDir;
@@ -9,8 +9,32 @@ export class FixtureManager {
9
9
  fixturePath(workflowId) {
10
10
  return path.join(this.fixturesDir, `${workflowId}.json`);
11
11
  }
12
+ fixtureDir(workflowId) {
13
+ return path.join(this.fixturesDir, workflowId);
14
+ }
12
15
  exists(workflowId) {
13
- return existsSync(this.fixturePath(workflowId));
16
+ return existsSync(this.fixturePath(workflowId)) || existsSync(this.fixtureDir(workflowId));
17
+ }
18
+ /** Load all fixtures for a workflow. Supports both directory (new) and single-file (legacy) formats. */
19
+ loadAll(workflowId) {
20
+ const dir = this.fixtureDir(workflowId);
21
+ if (existsSync(dir)) {
22
+ return readdirSync(dir)
23
+ .filter(f => f.endsWith('.json'))
24
+ .sort()
25
+ .flatMap(f => {
26
+ try {
27
+ const raw = readFileSync(path.join(dir, f), 'utf-8');
28
+ return [JSON.parse(raw)];
29
+ }
30
+ catch {
31
+ return [];
32
+ }
33
+ });
34
+ }
35
+ // Legacy single-file fallback
36
+ const single = this.load(workflowId);
37
+ return single ? [single] : [];
14
38
  }
15
39
  load(workflowId) {
16
40
  try {
@@ -31,9 +55,24 @@ export class FixtureManager {
31
55
  }
32
56
  }
33
57
  getCapturedDate(workflowId) {
34
- const fixture = this.load(workflowId);
35
- return fixture ? new Date(fixture.capturedAt) : null;
58
+ const fixtures = this.loadAll(workflowId);
59
+ if (fixtures.length === 0)
60
+ return null;
61
+ const dates = fixtures
62
+ .map(f => new Date(f.capturedAt))
63
+ .filter(d => !isNaN(d.getTime()));
64
+ if (dates.length === 0)
65
+ return null;
66
+ return dates.reduce((latest, d) => (d > latest ? d : latest));
67
+ }
68
+ /** Save a named fixture into the per-workflow directory (new multi-fixture format). */
69
+ async saveNamed(fixture, name) {
70
+ const dir = this.fixtureDir(fixture.workflowId);
71
+ await fs.mkdir(dir, { recursive: true });
72
+ const safeName = name.replace(/[^a-z0-9_-]/gi, '-').replace(/-+/g, '-').toLowerCase();
73
+ await fs.writeFile(path.join(dir, `${safeName}.json`), JSON.stringify(fixture, null, 2), 'utf-8');
36
74
  }
75
+ /** Legacy single-file save (used by offerSaveFixture after live runs). */
37
76
  async save(fixture) {
38
77
  await fs.mkdir(this.fixturesDir, { recursive: true });
39
78
  await fs.writeFile(this.fixturePath(fixture.workflowId), JSON.stringify(fixture, null, 2), 'utf-8');
@@ -1,52 +1,38 @@
1
- import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
2
- import { useState } from 'react';
3
- import { Box, Text, render, useApp } from 'ink';
4
- import TextInput from 'ink-text-input';
5
- import { MultilineInput } from 'ink-multiline-input';
6
- const SmartPromptElement = ({ onDone, title }) => {
7
- const [mode, setMode] = useState('single');
8
- const [value, setValue] = useState('');
9
- const [multiValue, setMultiValue] = useState('');
10
- const { exit } = useApp();
11
- const handleSingleSubmit = (text) => {
12
- if (text.trim() === '```') {
13
- setMode('multi');
14
- }
15
- else if (text.trim().length > 0) {
16
- onDone(text.trim());
17
- exit();
18
- }
19
- // If empty, do nothing (stays open)
20
- };
21
- const handleMultiSubmit = (text) => {
22
- // End with ``` to submit
23
- if (text.trim().endsWith('```')) {
24
- const finalValue = text.trim();
25
- const cleaned = finalValue.slice(0, -3).trim();
26
- if (cleaned.length > 0) {
27
- onDone(cleaned);
28
- exit();
29
- return;
30
- }
31
- }
32
- setMultiValue(text + '\n');
33
- };
34
- if (mode === 'single') {
35
- return (_jsxs(Box, { children: [_jsx(Text, { color: "green", children: "? " }), _jsxs(Text, { bold: true, children: [title || 'Describe the workflow (use ``` for multiline): ', " "] }), _jsx(TextInput, { value: value, onChange: setValue, onSubmit: handleSingleSubmit })] }));
36
- }
37
- return (_jsxs(Box, { flexDirection: "column", paddingX: 1, marginBottom: 1, children: [_jsxs(Box, { children: [_jsx(Text, { color: "green", children: "\u2714 " }), _jsxs(Text, { bold: true, children: [title || 'Describe the workflow (use ``` for multiline): ', " "] }), _jsx(Text, { color: "cyan", children: "```" })] }), _jsx(Box, { marginTop: 1, children: _jsx(Text, { color: "cyan", children: "Entering multiline mode. Type ``` on a new line to finish." }) }), _jsxs(Box, { flexDirection: "row", marginTop: 1, children: [_jsx(Text, { color: "gray", children: "\u2503 " }), _jsx(Box, { flexGrow: 1, children: _jsx(MultilineInput, { value: multiValue, onChange: setMultiValue, onSubmit: handleMultiSubmit, rows: 5, maxRows: 15, keyBindings: {
38
- submit: (key) => key.return && !key.shift,
39
- newline: (key) => key.return && key.shift
40
- } }) })] }), _jsx(Box, { marginTop: 1, children: _jsx(Text, { color: "gray", dimColor: true, children: "Arrows: Navigate | Enter: Submit (if ends with ```) | Shift+Enter: Newline" }) })] }));
41
- };
1
+ import * as readline from 'node:readline';
42
2
  export async function promptMultiline(message) {
3
+ const label = message || 'Describe the workflow (use ``` for multiline): ';
43
4
  return new Promise((resolve) => {
44
- let result = '';
45
- const instance = render(_jsx(SmartPromptElement, { onDone: (val) => {
46
- result = val;
47
- }, title: message }));
48
- instance.waitUntilExit().then(() => {
49
- resolve(result);
5
+ const rl = readline.createInterface({
6
+ input: process.stdin,
7
+ output: process.stdout,
8
+ terminal: true,
9
+ });
10
+ let multilineMode = false;
11
+ const lines = [];
12
+ process.stdout.write(`\x1b[32m?\x1b[0m \x1b[1m${label}\x1b[0m`);
13
+ const done = (value) => {
14
+ rl.close();
15
+ resolve(value);
16
+ };
17
+ rl.on('line', (line) => {
18
+ if (!multilineMode) {
19
+ if (line.trim() === '```') {
20
+ multilineMode = true;
21
+ process.stdout.write(`\x1b[36m Multiline mode — type \`\`\` on its own line to finish.\x1b[0m\n`);
22
+ }
23
+ else if (line.trim().length > 0) {
24
+ done(line.trim());
25
+ }
26
+ // empty line — stay open
27
+ }
28
+ else {
29
+ if (line.trim() === '```') {
30
+ done(lines.join('\n'));
31
+ }
32
+ else {
33
+ lines.push(line);
34
+ }
35
+ }
50
36
  });
51
37
  });
52
38
  }