@n8n/ai-workflow-builder 0.28.0 → 0.29.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/build.tsbuildinfo +1 -1
  2. package/dist/chains/prompt-categorization.d.ts +3 -0
  3. package/dist/chains/prompt-categorization.js +109 -0
  4. package/dist/chains/prompt-categorization.js.map +1 -0
  5. package/dist/chains/test/integration/test-helpers.d.ts +3 -0
  6. package/dist/chains/test/integration/test-helpers.js +16 -0
  7. package/dist/chains/test/integration/test-helpers.js.map +1 -0
  8. package/dist/tools/best-practices/chatbot.d.ts +7 -0
  9. package/dist/tools/best-practices/chatbot.js +118 -0
  10. package/dist/tools/best-practices/chatbot.js.map +1 -0
  11. package/dist/tools/best-practices/content-generation.d.ts +7 -0
  12. package/dist/tools/best-practices/content-generation.js +79 -0
  13. package/dist/tools/best-practices/content-generation.js.map +1 -0
  14. package/dist/tools/best-practices/data-extraction.d.ts +7 -0
  15. package/dist/tools/best-practices/data-extraction.js +105 -0
  16. package/dist/tools/best-practices/data-extraction.js.map +1 -0
  17. package/dist/tools/best-practices/form-input.d.ts +7 -0
  18. package/dist/tools/best-practices/form-input.js +173 -0
  19. package/dist/tools/best-practices/form-input.js.map +1 -0
  20. package/dist/tools/best-practices/index.d.ts +3 -0
  21. package/dist/tools/best-practices/index.js +27 -0
  22. package/dist/tools/best-practices/index.js.map +1 -0
  23. package/dist/tools/best-practices/scraping-and-research.d.ts +7 -0
  24. package/dist/tools/best-practices/scraping-and-research.js +147 -0
  25. package/dist/tools/best-practices/scraping-and-research.js.map +1 -0
  26. package/dist/tools/builder-tools.js +6 -0
  27. package/dist/tools/builder-tools.js.map +1 -1
  28. package/dist/tools/categorize-prompt.tool.d.ts +5 -0
  29. package/dist/tools/categorize-prompt.tool.js +84 -0
  30. package/dist/tools/categorize-prompt.tool.js.map +1 -0
  31. package/dist/tools/engines/node-search-engine.d.ts +0 -9
  32. package/dist/tools/engines/node-search-engine.js +52 -72
  33. package/dist/tools/engines/node-search-engine.js.map +1 -1
  34. package/dist/tools/get-best-practices.tool.d.ts +33 -0
  35. package/dist/tools/get-best-practices.tool.js +94 -0
  36. package/dist/tools/get-best-practices.tool.js.map +1 -0
  37. package/dist/tools/prompts/main-agent.prompt.js +23 -5
  38. package/dist/tools/prompts/main-agent.prompt.js.map +1 -1
  39. package/dist/tools/validate-workflow.tool.js +0 -16
  40. package/dist/tools/validate-workflow.tool.js.map +1 -1
  41. package/dist/types/best-practices.d.ts +6 -0
  42. package/dist/types/best-practices.js +3 -0
  43. package/dist/types/best-practices.js.map +1 -0
  44. package/dist/types/categorization.d.ts +23 -0
  45. package/dist/types/categorization.js +38 -0
  46. package/dist/types/categorization.js.map +1 -0
  47. package/dist/types/index.d.ts +2 -0
  48. package/dist/types/tools.d.ts +4 -0
  49. package/dist/utils/stream-processor.js +85 -58
  50. package/dist/utils/stream-processor.js.map +1 -1
  51. package/dist/validation/checks/agent-prompt.js +1 -1
  52. package/dist/validation/checks/agent-prompt.js.map +1 -1
  53. package/dist/validation/checks/connections.js +32 -1
  54. package/dist/validation/checks/connections.js.map +1 -1
  55. package/dist/workflow-state.d.ts +1 -1
  56. package/dist/workflow-state.js.map +1 -1
  57. package/package.json +10 -6
@@ -0,0 +1,3 @@
1
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
+ import { type PromptCategorization } from '../types/categorization';
3
+ export declare function promptCategorizationChain(llm: BaseChatModel, userPrompt: string): Promise<PromptCategorization>;
@@ -0,0 +1,109 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.promptCategorizationChain = promptCategorizationChain;
4
+ const prompts_1 = require("@langchain/core/prompts");
5
+ const zod_1 = require("zod");
6
+ const categorization_1 = require("../types/categorization");
7
+ const examplePrompts = [
8
+ {
9
+ prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
10
+ techniques: [
11
+ categorization_1.WorkflowTechnique.MONITORING,
12
+ categorization_1.WorkflowTechnique.CHATBOT,
13
+ categorization_1.WorkflowTechnique.CONTENT_GENERATION,
14
+ ],
15
+ },
16
+ {
17
+ prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
18
+ techniques: [
19
+ categorization_1.WorkflowTechnique.FORM_INPUT,
20
+ categorization_1.WorkflowTechnique.HUMAN_IN_THE_LOOP,
21
+ categorization_1.WorkflowTechnique.NOTIFICATION,
22
+ ],
23
+ },
24
+ {
25
+ prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
26
+ techniques: [
27
+ categorization_1.WorkflowTechnique.SCHEDULING,
28
+ categorization_1.WorkflowTechnique.SCRAPING_AND_RESEARCH,
29
+ categorization_1.WorkflowTechnique.DATA_EXTRACTION,
30
+ categorization_1.WorkflowTechnique.DATA_ANALYSIS,
31
+ ],
32
+ },
33
+ {
34
+ prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
35
+ techniques: [
36
+ categorization_1.WorkflowTechnique.DOCUMENT_PROCESSING,
37
+ categorization_1.WorkflowTechnique.DATA_EXTRACTION,
38
+ categorization_1.WorkflowTechnique.DATA_TRANSFORMATION,
39
+ categorization_1.WorkflowTechnique.ENRICHMENT,
40
+ ],
41
+ },
42
+ {
43
+ prompt: 'Build a searchable internal knowledge base from past support tickets',
44
+ techniques: [
45
+ categorization_1.WorkflowTechnique.DATA_TRANSFORMATION,
46
+ categorization_1.WorkflowTechnique.DATA_ANALYSIS,
47
+ categorization_1.WorkflowTechnique.KNOWLEDGE_BASE,
48
+ ],
49
+ },
50
+ ];
51
+ function formatExamplePrompts() {
52
+ return examplePrompts
53
+ .map((example) => `- ${example.prompt} → ${example.techniques.join(',')}`)
54
+ .join('\n');
55
+ }
56
+ const promptCategorizationTemplate = prompts_1.PromptTemplate.fromTemplate(`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
57
+ Be specific and identify all relevant techniques.
58
+
59
+ <user_prompt>
60
+ {userPrompt}
61
+ </user_prompt>
62
+
63
+ <workflow_techniques>
64
+ {techniques}
65
+ </workflow_techniques>
66
+
67
+ The following prompt categorization examples show a prompt → techniques involved to provide a sense
68
+ of how the categorization should be carried out.
69
+ <example_categorization>
70
+ ${formatExamplePrompts()}
71
+ </example_categorization>
72
+
73
+ Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
74
+ confident that they are applicable. If the prompt is ambigious or does not provide an obvious workflow
75
+ do not provide any techniques - if confidence is low avoid providing techniques.
76
+
77
+ Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
78
+ Rate your confidence in this categorization from 0.0 to 1.0.
79
+ `);
80
+ function formatTechniqueList() {
81
+ return Object.entries(categorization_1.TechniqueDescription)
82
+ .map(([key, description]) => `- **${key}**: ${description}`)
83
+ .join('\n');
84
+ }
85
+ async function promptCategorizationChain(llm, userPrompt) {
86
+ const categorizationSchema = zod_1.z.object({
87
+ techniques: zod_1.z
88
+ .array(zod_1.z.nativeEnum(categorization_1.WorkflowTechnique))
89
+ .min(0)
90
+ .max(5)
91
+ .describe('Zero to five workflow techniques identified in the prompt (maximum of 5)'),
92
+ confidence: zod_1.z
93
+ .number()
94
+ .min(0)
95
+ .max(1)
96
+ .describe('Confidence level in this categorization from 0.0 to 1.0'),
97
+ });
98
+ const modelWithStructure = llm.withStructuredOutput(categorizationSchema);
99
+ const prompt = await promptCategorizationTemplate.invoke({
100
+ userPrompt,
101
+ techniques: formatTechniqueList(),
102
+ });
103
+ const structuredOutput = await modelWithStructure.invoke(prompt);
104
+ return {
105
+ techniques: structuredOutput.techniques,
106
+ confidence: structuredOutput.confidence,
107
+ };
108
+ }
109
+ //# sourceMappingURL=prompt-categorization.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-categorization.js","sourceRoot":"","sources":["../../src/chains/prompt-categorization.ts"],"names":[],"mappings":";;AA8FA,8DA8BC;AA3HD,qDAAyD;AACzD,6BAAwB;AAExB,2DAIgC;AAEhC,MAAM,cAAc,GAAG;IACtB;QACC,MAAM,EAAE,sFAAsF;QAC9F,UAAU,EAAE;YACX,kCAAiB,CAAC,UAAU;YAC5B,kCAAiB,CAAC,OAAO;YACzB,kCAAiB,CAAC,kBAAkB;SACpC;KACD;IACD;QACC,MAAM,EAAE,+EAA+E;QACvF,UAAU,EAAE;YACX,kCAAiB,CAAC,UAAU;YAC5B,kCAAiB,CAAC,iBAAiB;YACnC,kCAAiB,CAAC,YAAY;SAC9B;KACD;IACD;QACC,MAAM,EAAE,iFAAiF;QACzF,UAAU,EAAE;YACX,kCAAiB,CAAC,UAAU;YAC5B,kCAAiB,CAAC,qBAAqB;YACvC,kCAAiB,CAAC,eAAe;YACjC,kCAAiB,CAAC,aAAa;SAC/B;KACD;IACD;QACC,MAAM,EAAE,iFAAiF;QACzF,UAAU,EAAE;YACX,kCAAiB,CAAC,mBAAmB;YACrC,kCAAiB,CAAC,eAAe;YACjC,kCAAiB,CAAC,mBAAmB;YACrC,kCAAiB,CAAC,UAAU;SAC5B;KACD;IACD;QACC,MAAM,EAAE,sEAAsE;QAC9E,UAAU,EAAE;YACX,kCAAiB,CAAC,mBAAmB;YACrC,kCAAiB,CAAC,aAAa;YAC/B,kCAAiB,CAAC,cAAc;SAChC;KACD;CACD,CAAC;AAEF,SAAS,oBAAoB;IAC5B,OAAO,cAAc;SACnB,GAAG,CAAC,CAAC,OAAO,EAAE,EAAE,CAAC,KAAK,OAAO,CAAC,MAAM,MAAM,OAAO,CAAC,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC;SACzE,IAAI,CAAC,IAAI,CAAC,CAAC;AACd,CAAC;AAED,MAAM,4BAA4B,GAAG,wBAAc,CAAC,YAAY,CAC/D;;;;;;;;;;;;;;EAcC,oBAAoB,EAAE;;;;;;;;;CASvB,CACA,CAAC;AAEF,SAAS,mBAAmB;IAC3B,OAAO,MAAM,CAAC,OAAO,CAAC,qCAAoB,CAAC;SACzC,GAAG,CAAC,CAAC,CAAC,GAAG,EAAE,WAAW,CAAC,EAAE,EAAE,CAAC,OAAO,GAAG,OAAO,WAAW,EAAE,CAAC;SAC3D,IAAI,CAAC,IAAI,CAAC,CAAC;AACd,CAAC;AAEM,KAAK,UAAU,yBAAyB,CAC9C,GAAkB,EAClB,UAAkB;IAElB,MAAM,oBAAoB,GAAG,OAAC,CAAC,MAAM,CAAC;QACrC,UAAU,EAAE,OAAC;aACX,KAAK,CAAC,OAAC,CAAC,UAAU,CAAC,kCAAiB,CAAC,CAAC;aACtC,GAAG,CAAC,CAAC,CAAC;aACN,GAAG,CAAC,CAAC,CAAC;aACN,QAAQ,CAAC,0EAA0E,CAAC;QACtF,UAAU,EAAE,OAAC;aACX,MAAM,EAAE;aACR,GAAG,CAAC,CAAC,CAAC;aACN,GAAG,CAAC,CAAC,CAAC;aACN,QAAQ,CAAC,yDAAyD,CAAC;KACrE,CAAC,CAAC;IAEH,MAAM,kBAAkB,GAAG,GAAG,CAAC,oBAAoB,CAAuB,oBAAoB,CAAC,CAAC;IAEhG,MAAM,MAAM,GAAG,MAAM,4BAA4B,CAAC,MAAM,CAAC;QACxD,UAAU;QACV,UAAU,EAAE,mBAAmB,EAAE;KACjC,CAAC,CAAC;IAEH,MAAM,gBAAgB,GAAG,MAAM,kBAAkB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;IAEjE,OAAO;QACN,UAAU,EAAE,gBAAgB,CAAC,UAAU;QACvC,UAAU,EAAE,gBAAgB,CAAC,UAAU;KACvC,CAAC;AACH,CAAC"}
@@ -0,0 +1,3 @@
1
+ import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
2
+ export declare function setupIntegrationLLM(): Promise<BaseChatModel>;
3
+ export declare function shouldRunIntegrationTests(): boolean;
@@ -0,0 +1,16 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.setupIntegrationLLM = setupIntegrationLLM;
4
+ exports.shouldRunIntegrationTests = shouldRunIntegrationTests;
5
+ const llm_config_1 = require("../../../llm-config");
6
+ async function setupIntegrationLLM() {
7
+ const apiKey = process.env.N8N_AI_ANTHROPIC_KEY;
8
+ if (!apiKey) {
9
+ throw new Error('N8N_AI_ANTHROPIC_KEY environment variable is required for integration tests');
10
+ }
11
+ return await (0, llm_config_1.anthropicClaudeSonnet45)({ apiKey });
12
+ }
13
+ function shouldRunIntegrationTests() {
14
+ return process.env.ENABLE_INTEGRATION_TESTS === 'true';
15
+ }
16
+ //# sourceMappingURL=test-helpers.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"test-helpers.js","sourceRoot":"","sources":["../../../../src/chains/test/integration/test-helpers.ts"],"names":[],"mappings":";;AAWA,kDAMC;AAQD,8DAEC;AAzBD,6CAAuD;AAShD,KAAK,UAAU,mBAAmB;IACxC,MAAM,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,oBAAoB,CAAC;IAChD,IAAI,CAAC,MAAM,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,6EAA6E,CAAC,CAAC;IAChG,CAAC;IACD,OAAO,MAAM,IAAA,oCAAuB,EAAC,EAAE,MAAM,EAAE,CAAC,CAAC;AAClD,CAAC;AAQD,SAAgB,yBAAyB;IACxC,OAAO,OAAO,CAAC,GAAG,CAAC,wBAAwB,KAAK,MAAM,CAAC;AACxD,CAAC"}
@@ -0,0 +1,7 @@
1
+ import type { BestPracticesDocument } from '../../types/best-practices';
2
+ export declare class ChatbotBestPractices implements BestPracticesDocument {
3
+ readonly technique: "chatbot";
4
+ readonly version = "1.0.0";
5
+ private readonly documentation;
6
+ getDocumentation(): string;
7
+ }
@@ -0,0 +1,118 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatbotBestPractices = void 0;
4
+ const categorization_1 = require("../../types/categorization");
5
+ class ChatbotBestPractices {
6
+ technique = categorization_1.WorkflowTechnique.CHATBOT;
7
+ version = '1.0.0';
8
+ documentation = `# Best Practices: Chatbot Workflows
9
+
10
+ ## Workflow Design
11
+
12
+ Break chatbot logic into manageable steps and use error handling nodes (IF, Switch) with fallback mechanisms to manage unexpected inputs.
13
+
14
+ Most chatbots run through external platforms like Slack, Telegram, or WhatsApp rather than through the n8n chat interface - if the user
15
+ requests a service like this don't use the built in chat interface nodes. If the user mentions chatting but does not mention a service
16
+ then use the built in n8n chat node.
17
+
18
+ CRITICAL: The user may ask to be able to chat to a workflow as well as trigger it via some other method, for example scheduling information
19
+ gathering but also being able to chat with the agent - in scenarios like this the two separate workflows MUST be connected through shared memory,
20
+ vector stores, data storage, or direct connections.
21
+
22
+ Example pattern:
23
+ - Schedule Trigger → News Gathering Agent → [memory node via ai_memory]
24
+ - Chat Trigger → Chatbot Agent → [SAME memory node via ai_memory]
25
+ - Result: Both agents share conversation/context history, enabling the chatbot to discuss gathered news
26
+
27
+ For the chatbot always use the same chat node type as used for response. If Telegram has been requested trigger the chatbot via telegram AND
28
+ respond via telegram - do not mix chatbot interfaces.
29
+
30
+ ## Context & Memory Management
31
+
32
+ Always utilise memory in chatbot agent nodes - providing context gives you full conversation history and more control over context.
33
+ Memory nodes enable the bot to handle follow-up questions by maintaining short-term conversation history.
34
+
35
+ Include information with the user prompt such as timestamp, user ID, or session metadata. This enriches context without relying solely on memory and user prompt.
36
+
37
+ If there are other agents involved in the workflow you should share memory between the chatbot and those other agents where it makes sense.
38
+ Connect the same memory node to multiple agents to enable data sharing and context continuity.
39
+
40
+ ## Context Engineering & AI Agent Output
41
+
42
+ It can be beneficial to respond to the user as a tool of the chatbot agent rather than using the agent output - this allows the agent to loop/carry out multiple responses if necessary.
43
+ This will require adding a note to the system prompt for the agent to tell it to use the tool to respond to the user.
44
+
45
+ ## Message Attribution
46
+
47
+ n8n chatbots often attach the attribution "n8n workflow" to messages by default - you must disable this setting which will
48
+ often be called "Append n8n Attribution" for nodes that support it, add this setting and set it to false.
49
+
50
+ ## Recommended Nodes
51
+
52
+ ### Chat Trigger (@n8n/n8n-nodes-langchain.chatTrigger)
53
+
54
+ Purpose: Entry point for user messages in n8n-hosted chat interfaces
55
+
56
+ Pitfalls:
57
+
58
+ - Most production chatbots use external platforms (Slack, Telegram) rather than n8n's chat interface
59
+
60
+ ### AI Agent (@n8n/n8n-nodes-langchain.agent)
61
+
62
+ Purpose: Orchestrates logic, tool use, and LLM calls for intelligent responses
63
+
64
+ ### Chat Model Nodes
65
+
66
+ - OpenAI Chat Model (@n8n/n8n-nodes-langchain.lmChatOpenAi)
67
+ - Google Gemini Chat Model (@n8n/n8n-nodes-langchain.lmChatGoogleGemini)
68
+ - xAI Grok Chat Model (@n8n/n8n-nodes-langchain.lmChatXAiGrok)
69
+ - DeepSeek Chat Model (@n8n/n8n-nodes-langchain.lmChatDeepSeek)
70
+
71
+ Purpose: Connect to LLMs for natural, context-aware responses
72
+
73
+ ### Simple Memory (@n8n/n8n-nodes-langchain.memoryBufferWindow)
74
+
75
+ Purpose: Maintains short-term conversation history for context continuity
76
+
77
+ ### HTTP Request (n8n-nodes-base.httpRequest)
78
+
79
+ Purpose: Fetches external data to enrich chatbot responses with real-time or organizational information
80
+
81
+ ### Database Nodes & Google Sheets
82
+
83
+ - Postgres (n8n-nodes-base.postgres)
84
+ - MySQL (n8n-nodes-base.mySql)
85
+ - MongoDB (n8n-nodes-base.mongoDb)
86
+ - Google Sheets (n8n-nodes-base.googleSheets)
87
+
88
+ Purpose: Store conversation logs, retrieve structured data, or maintain user preferences
89
+
90
+ ### IF / Switch
91
+
92
+ - If (n8n-nodes-base.if)
93
+ - Switch (n8n-nodes-base.switch)
94
+
95
+ Purpose: Conditional logic and error handling for routing messages or managing conversation state
96
+
97
+ ### Integration Nodes
98
+
99
+ - Slack (n8n-nodes-base.slack)
100
+ - Telegram (n8n-nodes-base.telegram)
101
+ - WhatsApp Business Cloud (n8n-nodes-base.whatsApp)
102
+ - Discord (n8n-nodes-base.discord)
103
+
104
+ Purpose: Multi-channel support for deploying chatbots on popular messaging platforms
105
+
106
+ ## Common Pitfalls to Avoid
107
+
108
+ ### Leaving Chatbot Disconnected
109
+
110
+ When a workflow has multiple triggers (e.g., scheduled data collection + chatbot interaction), the chatbot MUST have access to the data
111
+ generated by the workflow. Connect the chatbot through shared memory, vector stores, data storage, or direct data flow connections.
112
+ `;
113
+ getDocumentation() {
114
+ return this.documentation;
115
+ }
116
+ }
117
+ exports.ChatbotBestPractices = ChatbotBestPractices;
118
+ //# sourceMappingURL=chatbot.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"chatbot.js","sourceRoot":"","sources":["../../../src/tools/best-practices/chatbot.ts"],"names":[],"mappings":";;;AACA,2DAA2D;AAE3D,MAAa,oBAAoB;IACvB,SAAS,GAAG,kCAAiB,CAAC,OAAO,CAAC;IACtC,OAAO,GAAG,OAAO,CAAC;IAEV,aAAa,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAwGjC,CAAC;IAED,gBAAgB;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;CACD;AAjHD,oDAiHC"}
@@ -0,0 +1,7 @@
1
+ import type { BestPracticesDocument } from '../../types/best-practices';
2
+ export declare class ContentGenerationBestPractices implements BestPracticesDocument {
3
+ readonly technique: "content_generation";
4
+ readonly version = "1.0.0";
5
+ private readonly documentation;
6
+ getDocumentation(): string;
7
+ }
@@ -0,0 +1,79 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ContentGenerationBestPractices = void 0;
4
+ const categorization_1 = require("../../types/categorization");
5
+ class ContentGenerationBestPractices {
6
+ technique = categorization_1.WorkflowTechnique.CONTENT_GENERATION;
7
+ version = '1.0.0';
8
+ documentation = `# Best Practices: Content Generation Workflows
9
+
10
+ ## Workflow Design
11
+
12
+ Break complex tasks into sequential steps (e.g., generate text, create image, compose video) for modularity and easier troubleshooting.
13
+
14
+ ## Content-Specific Guidance
15
+
16
+ For text generation, validate and sanitize input/output to avoid malformed data. When generating images, prefer binary data over URLs for uploads to avoid media type errors.
17
+
18
+ ## Recommended Nodes
19
+
20
+ ### OpenAI (@n8n/n8n-nodes-langchain.openAi)
21
+
22
+ Purpose: GPT-based text generation, DALL-E image generation, text-to-speech (TTS), and audio transcription
23
+
24
+ ### xAI Grok Chat Model (@n8n/n8n-nodes-langchain.lmChatXAiGrok)
25
+
26
+ Purpose: Conversational AI and text generation
27
+
28
+ ### Google Gemini Chat Model (@n8n/n8n-nodes-langchain.lmChatGoogleGemini)
29
+
30
+ Purpose: Image analysis and generation, video generation from text prompts, multimodal content creation
31
+
32
+ ### ElevenLabs
33
+
34
+ Purpose: Natural-sounding AI voice generation
35
+
36
+ Note: Use HTTP Request node or a community node to integrate with ElevenLabs API
37
+
38
+ ### HTTP Request (n8n-nodes-base.httpRequest)
39
+
40
+ Purpose: Integrating with other LLM and content generation APIs (e.g., Jasper, Writesonic, Anthropic, HuggingFace)
41
+
42
+ ### Edit Image (n8n-nodes-base.editImage)
43
+
44
+ Purpose: Manipulating images - resize, crop, rotate, and format conversion
45
+
46
+ Pitfalls:
47
+
48
+ - Ensure input is valid binary image data
49
+ - Check output format compatibility with downstream nodes
50
+
51
+ ### Markdown (n8n-nodes-base.markdown)
52
+
53
+ Purpose: Formatting and converting text to HTML or Markdown reports
54
+
55
+ ### Facebook Graph API (n8n-nodes-base.facebookGraphApi)
56
+
57
+ Purpose: Uploading videos and images to Instagram and Facebook
58
+
59
+ Pitfalls:
60
+
61
+ - Use binary data fields rather than URLs for media uploads to prevent "media type" errors
62
+ - Verify page IDs and access tokens have correct permissions
63
+
64
+ ### Wait (n8n-nodes-base.wait)
65
+
66
+ Purpose: Handling delays in video processing/uploading and respecting API rate limits
67
+
68
+ ## Common Pitfalls to Avoid
69
+
70
+ Binary Data Handling: For media uploads, use binary fields rather than URLs to prevent "media type" errors, especially with Facebook and Instagram APIs. Download media to binary data first, then upload from binary rather than passing URLs.
71
+
72
+ Async Processing: For long-running content generation tasks (especially video), implement proper wait/polling mechanisms. Don't assume instant completion - many AI services process requests asynchronously.
73
+ `;
74
+ getDocumentation() {
75
+ return this.documentation;
76
+ }
77
+ }
78
+ exports.ContentGenerationBestPractices = ContentGenerationBestPractices;
79
+ //# sourceMappingURL=content-generation.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"content-generation.js","sourceRoot":"","sources":["../../../src/tools/best-practices/content-generation.ts"],"names":[],"mappings":";;;AACA,2DAA2D;AAE3D,MAAa,8BAA8B;IACjC,SAAS,GAAG,kCAAiB,CAAC,kBAAkB,CAAC;IACjD,OAAO,GAAG,OAAO,CAAC;IAEV,aAAa,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAiEjC,CAAC;IAED,gBAAgB;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;CACD;AA1ED,wEA0EC"}
@@ -0,0 +1,7 @@
1
+ import type { BestPracticesDocument } from '../../types/best-practices';
2
+ export declare class DataExtractionBestPractices implements BestPracticesDocument {
3
+ readonly technique: "data_extraction";
4
+ readonly version = "1.0.0";
5
+ private readonly documentation;
6
+ getDocumentation(): string;
7
+ }
@@ -0,0 +1,105 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DataExtractionBestPractices = void 0;
4
+ const categorization_1 = require("../../types/categorization");
5
+ class DataExtractionBestPractices {
6
+ technique = categorization_1.WorkflowTechnique.DATA_EXTRACTION;
7
+ version = '1.0.0';
8
+ documentation = `# Best Practices: Data Extraction Workflows
9
+
10
+ ## Node Selection by Data Type
11
+
12
+ Choose the right node for your data source. Use Extract From File for CSV, Excel, PDF, and text files to convert binary data to JSON for further processing.
13
+
14
+ Use Information Extractor or AI nodes for extracting structured data from unstructured text such as PDFs or emails using LLMs.
15
+
16
+ For binary data, ensure you use nodes like Extract From File to handle files properly.
17
+
18
+ ## Data Structure & Type Management
19
+
20
+ Normalize data structure early in your workflow. Use transformation nodes like Split Out, Aggregate, or Set to ensure your data matches n8n's expected structure: an array of objects with a json key.
21
+ Not transforming incoming data to n8n's expected format causes downstream node failures.
22
+
23
+ When working with large amounts of information, n8n's display can be hard to view. Use the Edit Fields node to help organize and view data more clearly during development and debugging.
24
+
25
+ ## Large File Handling
26
+
27
+ Process files in batches or use sub-workflows to avoid memory issues. For large binary files, consider enabling filesystem mode (N8N_DEFAULT_BINARY_DATA_MODE=filesystem) if self-hosted, to store binary data on disk instead of memory.
28
+
29
+ Processing too many items or large files at once can crash your instance. Always batch or split processing for large datasets to manage memory effectively.
30
+
31
+ ## Binary Data Management
32
+
33
+ Binary data can be lost if intermediate nodes (like Set or Code) do not have "Include Other Input Fields" enabled, especially in sub-workflows. Always verify binary data is preserved through your workflow pipeline.
34
+
35
+ ## AI-Powered Extraction
36
+
37
+ Leverage AI for unstructured data using nodes like Information Extractor or Summarization Chain to extract structured data from unstructured sources such as PDFs, emails, or web pages.
38
+
39
+ ## Recommended Nodes
40
+
41
+ ### Extract From File (n8n-nodes-base.extractFromFile)
42
+
43
+ Purpose: Converts binary data from CSV, Excel, PDF, and text files to JSON for processing
44
+
45
+ Pitfalls:
46
+
47
+ - Ensure the correct binary field name is specified in the node configuration
48
+ - Verify file format compatibility before extraction
49
+
50
+ ### HTML Extract (n8n-nodes-base.htmlExtract)
51
+
52
+ Purpose: Scrapes data from web pages using CSS selectors
53
+
54
+ ### Split Out (n8n-nodes-base.splitOut)
55
+
56
+ Purpose: Processes arrays of items individually for sequential operations
57
+
58
+ ### Edit Fields (Set) (n8n-nodes-base.set)
59
+
60
+ Purpose: Data transformation and mapping to normalize structure
61
+
62
+ Pitfalls:
63
+
64
+ - Enable "Include Other Input Fields" to preserve binary data
65
+ - Pay attention to data types - mixing types causes unexpected failures
66
+
67
+ ### Information Extractor (@n8n/n8n-nodes-langchain.informationExtractor)
68
+
69
+ Purpose: AI-powered extraction of structured data from unstructured text
70
+
71
+ Pitfalls:
72
+
73
+ - Requires proper schema definition for extraction
74
+
75
+ ### Summarization Chain (@n8n/n8n-nodes-langchain.chainSummarization)
76
+
77
+ Purpose: Summarizes large text blocks using AI for condensed information extraction
78
+
79
+ Pitfalls:
80
+
81
+ - Context window limits may truncate very long documents
82
+ - Verify summary quality matches requirements
83
+
84
+ ### HTTP Request (n8n-nodes-base.httpRequest)
85
+
86
+ Purpose: Fetches data from APIs or web pages for extraction
87
+
88
+ ### Code (n8n-nodes-base.code)
89
+
90
+ Purpose: Custom logic for complex data transformations
91
+
92
+ ## Common Pitfalls to Avoid
93
+
94
+ Data Type Confusion: People often mix up data types - n8n can be very lenient but it can lead to problems. Pay close attention to what type you are getting and ensure consistency throughout the workflow.
95
+
96
+ Binary Data Loss: Binary data can be lost if intermediate nodes (Set, Code) do not have "Include Other Input Fields" enabled, especially in sub-workflows. Always verify binary data preservation.
97
+
98
+ Large Data Display Issues: n8n displaying large amounts of information can be hard to view during development. Use the Edit Fields node to help organize and view data more clearly.
99
+ `;
100
+ getDocumentation() {
101
+ return this.documentation;
102
+ }
103
+ }
104
+ exports.DataExtractionBestPractices = DataExtractionBestPractices;
105
+ //# sourceMappingURL=data-extraction.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"data-extraction.js","sourceRoot":"","sources":["../../../src/tools/best-practices/data-extraction.ts"],"names":[],"mappings":";;;AACA,2DAA2D;AAE3D,MAAa,2BAA2B;IAC9B,SAAS,GAAG,kCAAiB,CAAC,eAAe,CAAC;IAC9C,OAAO,GAAG,OAAO,CAAC;IAEV,aAAa,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA2FjC,CAAC;IAED,gBAAgB;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;CACD;AApGD,kEAoGC"}
@@ -0,0 +1,7 @@
1
+ import type { BestPracticesDocument } from '../../types/best-practices';
2
+ export declare class FormInputBestPractices implements BestPracticesDocument {
3
+ readonly technique: "form_input";
4
+ readonly version = "1.0.0";
5
+ private readonly documentation;
6
+ getDocumentation(): string;
7
+ }
@@ -0,0 +1,173 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FormInputBestPractices = void 0;
4
+ const categorization_1 = require("../../types/categorization");
5
+ class FormInputBestPractices {
6
+ technique = categorization_1.WorkflowTechnique.FORM_INPUT;
7
+ version = '1.0.0';
8
+ documentation = `# Best Practices: Form Input Workflows
9
+
10
+ ## Workflow Design
11
+
12
+ ### Critical: Always Store Raw Form Data
13
+
14
+ ALWAYS store raw form responses to a persistent data storage destination even if the primary purpose of the workflow is
15
+ to trigger another action (like sending to an API or triggering a notification). This allows users to monitor
16
+ form responses as part of the administration of their workflow.
17
+
18
+ Required storage destinations include:
19
+ - Google Sheets node
20
+ - Airtable node
21
+ - n8n Data Tables
22
+ - PostgreSQL/MySQL/MongoDB nodes
23
+ - Any other database or spreadsheet service
24
+
25
+ IMPORTANT: Simply using Set or Merge nodes is NOT sufficient. These nodes only transform data in memory - they do not
26
+ persist data. You must use an actual storage node (like Google Sheets, Airtable, or Data Tables) to write the data.
27
+
28
+ Storage Requirements:
29
+ - Store the un-edited user input immediately after the form steps are complete
30
+ - Do not store only a summary or edited version of the user's inputs - store the raw data
31
+ - For single-step forms: store immediately after the form trigger
32
+ - For multi-step forms: store immediately after aggregating all steps with Set/Merge nodes
33
+ - The storage node should appear in the workflow right after data collection/aggregation
34
+
35
+ ## Message Attribution
36
+
37
+ n8n forms attach the attribution "n8n workflow" to messages by default - you must disable this setting which will
38
+ often be called "Append n8n Attribution" for the n8n form nodes, add this setting and set it to false.
39
+
40
+ ## Multi-Step Forms
41
+
42
+ Build multi-step forms by chaining multiple Form nodes together. Each Form node represents a page or step in your form
43
+ sequence. Use the n8n Form Trigger node to start the workflow and display the first form page to the user.
44
+
45
+ ## Data Collection & Aggregation
46
+
47
+ Collect and merge all user responses from each form step before writing to your destination (e.g., Google Sheets). Use
48
+ Set or Merge nodes to combine data as needed. Make sure your JSON keys match the column names in your destination for
49
+ automatic mapping.
50
+
51
+ ## Conditional Logic & Branching
52
+
53
+ Use IF or Switch nodes to direct users to different form pages based on their previous answers. This enables dynamic
54
+ form flows where the path changes based on user input, creating personalized form experiences.
55
+
56
+ ## Dynamic Form Fields
57
+
58
+ For forms that require dynamic options (e.g., dropdowns populated from an API or previous step), generate the form
59
+ definition in a Code node and pass it to the Form node as JSON. You can define forms using JSON for dynamic or
60
+ conditional fields, and even generate form fields dynamically using a Code node if needed.
61
+
62
+ ## Input Validation
63
+
64
+ Validate user input between steps to ensure data quality. If input is invalid, loop back to the relevant form step with
65
+ an error message to guide the user to correct their submission. This prevents bad data from entering your system.
66
+
67
+ ## Recommended Nodes
68
+
69
+ ### n8n Form Trigger (n8n-nodes-base.formTrigger)
70
+
71
+ Purpose: Starts the workflow and displays the first form page to the user
72
+
73
+ Pitfalls:
74
+
75
+ - Use the Production URL for live forms; the Test URL is for development and debugging only
76
+ - Ensure the form trigger is properly configured before sharing URLs with users
77
+
78
+ ### n8n Form (n8n-nodes-base.form)
79
+
80
+ Purpose: Displays form pages in multi-step form sequences
81
+
82
+ Pitfalls:
83
+
84
+ - Each Form node represents one page/step in your form
85
+ - You can define forms using JSON for dynamic or conditional fields
86
+ - Generate form fields dynamically using a Code node if needed for complex scenarios
87
+
88
+ ### Storage Nodes
89
+
90
+ Purpose: Persist raw form data to a storage destination, preference should be for built-in n8n tables
91
+ but use the most applicable node depending on the user's request.
92
+
93
+ Required nodes (use at least one):
94
+ - Data table (n8n-nodes-base.dataTable): Built-in n8n storage for quick setup
95
+ - Google Sheets (n8n-nodes-base.googleSheets): Best for simple spreadsheet storage
96
+ - Airtable (n8n-nodes-base.airtable): Best for structured database with relationships
97
+ - Postgres (n8n-nodes-base.postgres) / MySQL (n8n-nodes-base.mySql) / MongoDB (n8n-nodes-base.mongoDb): For production database storage
98
+
99
+ Pitfalls:
100
+
101
+ - Every form workflow MUST include a storage node that actually writes data to a destination
102
+ - Set and Merge nodes alone are NOT sufficient - they only transform data in memory
103
+ - The storage node should be placed immediately after the form trigger (single-step) or after data aggregation (multi-step)
104
+
105
+ ### Code (n8n-nodes-base.code)
106
+
107
+ Purpose: Processes form data, generates dynamic form definitions, or implements custom validation logic
108
+
109
+ ### Edit Fields (Set) (n8n-nodes-base.set)
110
+
111
+ Purpose: Aggregates and transforms form data between steps (NOT for storage - use a storage node)
112
+
113
+ ### Merge (n8n-nodes-base.merge)
114
+
115
+ Purpose: Combines data from multiple form steps into a single dataset (NOT for storage - use a storage node)
116
+
117
+ Pitfalls:
118
+
119
+ - Ensure data from all form steps is properly merged before writing to destination
120
+ - Use appropriate merge modes (append, merge by key, etc.) for your use case
121
+ - Remember: Merge prepares data but does not store it - add a storage node after Merge
122
+
123
+ ### If (n8n-nodes-base.if)
124
+
125
+ Purpose: Routes users to different form pages based on their previous answers
126
+
127
+ ### Switch (n8n-nodes-base.switch)
128
+
129
+ Purpose: Implements multi-path conditional routing in complex forms
130
+
131
+ Pitfalls:
132
+
133
+ - Include a default case to handle unexpected input values
134
+ - Keep routing logic clear and maintainable
135
+
136
+ ## Common Pitfalls to Avoid
137
+
138
+ ### Missing Raw Form Response Storage
139
+
140
+ When building n8n forms it is recommended to always store the raw form response to some form of data storage (Googlesheets, Airtable, etc)
141
+ for administration later. It is CRITICAL if you create a n8n form node that you store the raw output with a storage node.
142
+
143
+ ### Data Loss in Multi-Step Forms
144
+
145
+ Aggregate all form step data using Set/Merge nodes before writing to your destination. Failing to merge data from multiple steps
146
+ can result in incomplete form submissions being stored. After merging, ensure you write the complete dataset to a storage node.
147
+
148
+ ### Poor User Experience
149
+
150
+ Use the Form Ending page type to show a completion message or redirect users after submission.
151
+ Without a proper ending, users may be confused about whether their submission was successful.
152
+
153
+ ### Invalid Data
154
+
155
+ Implement validation between form steps to catch errors early. Without validation, invalid data can
156
+ propagate through your workflow and corrupt your destination data.
157
+
158
+ ### Complex Field Generation
159
+
160
+ When generating dynamic form fields, ensure the JSON structure exactly matches what the Form
161
+ node expects. Test thoroughly with the Test URL before going live.
162
+
163
+ ### Mapping Errors
164
+
165
+ When writing to Google Sheets or other destinations, ensure field names match exactly. Mismatched names
166
+ will cause data to be written to wrong columns or fail entirely.
167
+ `;
168
+ getDocumentation() {
169
+ return this.documentation;
170
+ }
171
+ }
172
+ exports.FormInputBestPractices = FormInputBestPractices;
173
+ //# sourceMappingURL=form-input.js.map