@n8n/ai-workflow-builder 0.28.0 → 0.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/build.tsbuildinfo +1 -1
- package/dist/chains/prompt-categorization.d.ts +3 -0
- package/dist/chains/prompt-categorization.js +109 -0
- package/dist/chains/prompt-categorization.js.map +1 -0
- package/dist/chains/test/integration/test-helpers.d.ts +3 -0
- package/dist/chains/test/integration/test-helpers.js +16 -0
- package/dist/chains/test/integration/test-helpers.js.map +1 -0
- package/dist/constants.d.ts +1 -1
- package/dist/constants.js +1 -1
- package/dist/llm-config.js +13 -0
- package/dist/llm-config.js.map +1 -1
- package/dist/tools/add-node.tool.d.ts +29 -0
- package/dist/tools/add-node.tool.js +26 -8
- package/dist/tools/add-node.tool.js.map +1 -1
- package/dist/tools/best-practices/chatbot.d.ts +7 -0
- package/dist/tools/best-practices/chatbot.js +118 -0
- package/dist/tools/best-practices/chatbot.js.map +1 -0
- package/dist/tools/best-practices/content-generation.d.ts +7 -0
- package/dist/tools/best-practices/content-generation.js +79 -0
- package/dist/tools/best-practices/content-generation.js.map +1 -0
- package/dist/tools/best-practices/data-extraction.d.ts +7 -0
- package/dist/tools/best-practices/data-extraction.js +105 -0
- package/dist/tools/best-practices/data-extraction.js.map +1 -0
- package/dist/tools/best-practices/form-input.d.ts +7 -0
- package/dist/tools/best-practices/form-input.js +173 -0
- package/dist/tools/best-practices/form-input.js.map +1 -0
- package/dist/tools/best-practices/index.d.ts +3 -0
- package/dist/tools/best-practices/index.js +27 -0
- package/dist/tools/best-practices/index.js.map +1 -0
- package/dist/tools/best-practices/scraping-and-research.d.ts +7 -0
- package/dist/tools/best-practices/scraping-and-research.js +147 -0
- package/dist/tools/best-practices/scraping-and-research.js.map +1 -0
- package/dist/tools/builder-tools.js +6 -0
- package/dist/tools/builder-tools.js.map +1 -1
- package/dist/tools/categorize-prompt.tool.d.ts +5 -0
- package/dist/tools/categorize-prompt.tool.js +84 -0
- package/dist/tools/categorize-prompt.tool.js.map +1 -0
- package/dist/tools/engines/node-search-engine.d.ts +0 -9
- package/dist/tools/engines/node-search-engine.js +74 -73
- package/dist/tools/engines/node-search-engine.js.map +1 -1
- package/dist/tools/get-best-practices.tool.d.ts +33 -0
- package/dist/tools/get-best-practices.tool.js +94 -0
- package/dist/tools/get-best-practices.tool.js.map +1 -0
- package/dist/tools/helpers/validation.d.ts +1 -1
- package/dist/tools/helpers/validation.js +3 -2
- package/dist/tools/helpers/validation.js.map +1 -1
- package/dist/tools/node-details.tool.d.ts +4 -0
- package/dist/tools/node-details.tool.js +3 -2
- package/dist/tools/node-details.tool.js.map +1 -1
- package/dist/tools/prompts/main-agent.prompt.js +23 -5
- package/dist/tools/prompts/main-agent.prompt.js.map +1 -1
- package/dist/tools/update-node-parameters.tool.js +1 -1
- package/dist/tools/update-node-parameters.tool.js.map +1 -1
- package/dist/tools/utils/node-creation.utils.d.ts +2 -2
- package/dist/tools/utils/node-creation.utils.js +7 -3
- package/dist/tools/utils/node-creation.utils.js.map +1 -1
- package/dist/tools/validate-workflow.tool.js +0 -16
- package/dist/tools/validate-workflow.tool.js.map +1 -1
- package/dist/types/best-practices.d.ts +6 -0
- package/dist/types/best-practices.js +3 -0
- package/dist/types/best-practices.js.map +1 -0
- package/dist/types/categorization.d.ts +23 -0
- package/dist/types/categorization.js +38 -0
- package/dist/types/categorization.js.map +1 -0
- package/dist/types/index.d.ts +2 -0
- package/dist/types/nodes.d.ts +1 -0
- package/dist/types/tools.d.ts +4 -0
- package/dist/utils/http-proxy-agent.d.ts +2 -0
- package/dist/utils/http-proxy-agent.js +16 -0
- package/dist/utils/http-proxy-agent.js.map +1 -0
- package/dist/utils/stream-processor.js +85 -58
- package/dist/utils/stream-processor.js.map +1 -1
- package/dist/validation/checks/agent-prompt.js +1 -1
- package/dist/validation/checks/agent-prompt.js.map +1 -1
- package/dist/validation/checks/connections.js +32 -1
- package/dist/validation/checks/connections.js.map +1 -1
- package/dist/workflow-builder-agent.js +30 -22
- package/dist/workflow-builder-agent.js.map +1 -1
- package/dist/workflow-state.d.ts +1 -1
- package/dist/workflow-state.js.map +1 -1
- package/package.json +10 -6
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DataExtractionBestPractices = void 0;
|
|
4
|
+
const categorization_1 = require("../../types/categorization");
|
|
5
|
+
class DataExtractionBestPractices {
|
|
6
|
+
technique = categorization_1.WorkflowTechnique.DATA_EXTRACTION;
|
|
7
|
+
version = '1.0.0';
|
|
8
|
+
documentation = `# Best Practices: Data Extraction Workflows
|
|
9
|
+
|
|
10
|
+
## Node Selection by Data Type
|
|
11
|
+
|
|
12
|
+
Choose the right node for your data source. Use Extract From File for CSV, Excel, PDF, and text files to convert binary data to JSON for further processing.
|
|
13
|
+
|
|
14
|
+
Use Information Extractor or AI nodes for extracting structured data from unstructured text such as PDFs or emails using LLMs.
|
|
15
|
+
|
|
16
|
+
For binary data, ensure you use nodes like Extract From File to handle files properly.
|
|
17
|
+
|
|
18
|
+
## Data Structure & Type Management
|
|
19
|
+
|
|
20
|
+
Normalize data structure early in your workflow. Use transformation nodes like Split Out, Aggregate, or Set to ensure your data matches n8n's expected structure: an array of objects with a json key.
|
|
21
|
+
Not transforming incoming data to n8n's expected format causes downstream node failures.
|
|
22
|
+
|
|
23
|
+
When working with large amounts of information, n8n's display can be hard to view. Use the Edit Fields node to help organize and view data more clearly during development and debugging.
|
|
24
|
+
|
|
25
|
+
## Large File Handling
|
|
26
|
+
|
|
27
|
+
Process files in batches or use sub-workflows to avoid memory issues. For large binary files, consider enabling filesystem mode (N8N_DEFAULT_BINARY_DATA_MODE=filesystem) if self-hosted, to store binary data on disk instead of memory.
|
|
28
|
+
|
|
29
|
+
Processing too many items or large files at once can crash your instance. Always batch or split processing for large datasets to manage memory effectively.
|
|
30
|
+
|
|
31
|
+
## Binary Data Management
|
|
32
|
+
|
|
33
|
+
Binary data can be lost if intermediate nodes (like Set or Code) do not have "Include Other Input Fields" enabled, especially in sub-workflows. Always verify binary data is preserved through your workflow pipeline.
|
|
34
|
+
|
|
35
|
+
## AI-Powered Extraction
|
|
36
|
+
|
|
37
|
+
Leverage AI for unstructured data using nodes like Information Extractor or Summarization Chain to extract structured data from unstructured sources such as PDFs, emails, or web pages.
|
|
38
|
+
|
|
39
|
+
## Recommended Nodes
|
|
40
|
+
|
|
41
|
+
### Extract From File (n8n-nodes-base.extractFromFile)
|
|
42
|
+
|
|
43
|
+
Purpose: Converts binary data from CSV, Excel, PDF, and text files to JSON for processing
|
|
44
|
+
|
|
45
|
+
Pitfalls:
|
|
46
|
+
|
|
47
|
+
- Ensure the correct binary field name is specified in the node configuration
|
|
48
|
+
- Verify file format compatibility before extraction
|
|
49
|
+
|
|
50
|
+
### HTML Extract (n8n-nodes-base.htmlExtract)
|
|
51
|
+
|
|
52
|
+
Purpose: Scrapes data from web pages using CSS selectors
|
|
53
|
+
|
|
54
|
+
### Split Out (n8n-nodes-base.splitOut)
|
|
55
|
+
|
|
56
|
+
Purpose: Processes arrays of items individually for sequential operations
|
|
57
|
+
|
|
58
|
+
### Edit Fields (Set) (n8n-nodes-base.set)
|
|
59
|
+
|
|
60
|
+
Purpose: Data transformation and mapping to normalize structure
|
|
61
|
+
|
|
62
|
+
Pitfalls:
|
|
63
|
+
|
|
64
|
+
- Enable "Include Other Input Fields" to preserve binary data
|
|
65
|
+
- Pay attention to data types - mixing types causes unexpected failures
|
|
66
|
+
|
|
67
|
+
### Information Extractor (@n8n/n8n-nodes-langchain.informationExtractor)
|
|
68
|
+
|
|
69
|
+
Purpose: AI-powered extraction of structured data from unstructured text
|
|
70
|
+
|
|
71
|
+
Pitfalls:
|
|
72
|
+
|
|
73
|
+
- Requires proper schema definition for extraction
|
|
74
|
+
|
|
75
|
+
### Summarization Chain (@n8n/n8n-nodes-langchain.chainSummarization)
|
|
76
|
+
|
|
77
|
+
Purpose: Summarizes large text blocks using AI for condensed information extraction
|
|
78
|
+
|
|
79
|
+
Pitfalls:
|
|
80
|
+
|
|
81
|
+
- Context window limits may truncate very long documents
|
|
82
|
+
- Verify summary quality matches requirements
|
|
83
|
+
|
|
84
|
+
### HTTP Request (n8n-nodes-base.httpRequest)
|
|
85
|
+
|
|
86
|
+
Purpose: Fetches data from APIs or web pages for extraction
|
|
87
|
+
|
|
88
|
+
### Code (n8n-nodes-base.code)
|
|
89
|
+
|
|
90
|
+
Purpose: Custom logic for complex data transformations
|
|
91
|
+
|
|
92
|
+
## Common Pitfalls to Avoid
|
|
93
|
+
|
|
94
|
+
Data Type Confusion: People often mix up data types - n8n can be very lenient but it can lead to problems. Pay close attention to what type you are getting and ensure consistency throughout the workflow.
|
|
95
|
+
|
|
96
|
+
Binary Data Loss: Binary data can be lost if intermediate nodes (Set, Code) do not have "Include Other Input Fields" enabled, especially in sub-workflows. Always verify binary data preservation.
|
|
97
|
+
|
|
98
|
+
Large Data Display Issues: n8n displaying large amounts of information can be hard to view during development. Use the Edit Fields node to help organize and view data more clearly.
|
|
99
|
+
`;
|
|
100
|
+
getDocumentation() {
|
|
101
|
+
return this.documentation;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
exports.DataExtractionBestPractices = DataExtractionBestPractices;
|
|
105
|
+
//# sourceMappingURL=data-extraction.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"data-extraction.js","sourceRoot":"","sources":["../../../src/tools/best-practices/data-extraction.ts"],"names":[],"mappings":";;;AACA,2DAA2D;AAE3D,MAAa,2BAA2B;IAC9B,SAAS,GAAG,kCAAiB,CAAC,eAAe,CAAC;IAC9C,OAAO,GAAG,OAAO,CAAC;IAEV,aAAa,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA2FjC,CAAC;IAED,gBAAgB;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;CACD;AApGD,kEAoGC"}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import type { BestPracticesDocument } from '../../types/best-practices';
|
|
2
|
+
export declare class FormInputBestPractices implements BestPracticesDocument {
|
|
3
|
+
readonly technique: "form_input";
|
|
4
|
+
readonly version = "1.0.0";
|
|
5
|
+
private readonly documentation;
|
|
6
|
+
getDocumentation(): string;
|
|
7
|
+
}
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FormInputBestPractices = void 0;
|
|
4
|
+
const categorization_1 = require("../../types/categorization");
|
|
5
|
+
class FormInputBestPractices {
|
|
6
|
+
technique = categorization_1.WorkflowTechnique.FORM_INPUT;
|
|
7
|
+
version = '1.0.0';
|
|
8
|
+
documentation = `# Best Practices: Form Input Workflows
|
|
9
|
+
|
|
10
|
+
## Workflow Design
|
|
11
|
+
|
|
12
|
+
### Critical: Always Store Raw Form Data
|
|
13
|
+
|
|
14
|
+
ALWAYS store raw form responses to a persistent data storage destination even if the primary purpose of the workflow is
|
|
15
|
+
to trigger another action (like sending to an API or triggering a notification). This allows users to monitor
|
|
16
|
+
form responses as part of the administration of their workflow.
|
|
17
|
+
|
|
18
|
+
Required storage destinations include:
|
|
19
|
+
- Google Sheets node
|
|
20
|
+
- Airtable node
|
|
21
|
+
- n8n Data Tables
|
|
22
|
+
- PostgreSQL/MySQL/MongoDB nodes
|
|
23
|
+
- Any other database or spreadsheet service
|
|
24
|
+
|
|
25
|
+
IMPORTANT: Simply using Set or Merge nodes is NOT sufficient. These nodes only transform data in memory - they do not
|
|
26
|
+
persist data. You must use an actual storage node (like Google Sheets, Airtable, or Data Tables) to write the data.
|
|
27
|
+
|
|
28
|
+
Storage Requirements:
|
|
29
|
+
- Store the un-edited user input immediately after the form steps are complete
|
|
30
|
+
- Do not store only a summary or edited version of the user's inputs - store the raw data
|
|
31
|
+
- For single-step forms: store immediately after the form trigger
|
|
32
|
+
- For multi-step forms: store immediately after aggregating all steps with Set/Merge nodes
|
|
33
|
+
- The storage node should appear in the workflow right after data collection/aggregation
|
|
34
|
+
|
|
35
|
+
## Message Attribution
|
|
36
|
+
|
|
37
|
+
n8n forms attach the attribution "n8n workflow" to messages by default - you must disable this setting which will
|
|
38
|
+
often be called "Append n8n Attribution" for the n8n form nodes, add this setting and set it to false.
|
|
39
|
+
|
|
40
|
+
## Multi-Step Forms
|
|
41
|
+
|
|
42
|
+
Build multi-step forms by chaining multiple Form nodes together. Each Form node represents a page or step in your form
|
|
43
|
+
sequence. Use the n8n Form Trigger node to start the workflow and display the first form page to the user.
|
|
44
|
+
|
|
45
|
+
## Data Collection & Aggregation
|
|
46
|
+
|
|
47
|
+
Collect and merge all user responses from each form step before writing to your destination (e.g., Google Sheets). Use
|
|
48
|
+
Set or Merge nodes to combine data as needed. Make sure your JSON keys match the column names in your destination for
|
|
49
|
+
automatic mapping.
|
|
50
|
+
|
|
51
|
+
## Conditional Logic & Branching
|
|
52
|
+
|
|
53
|
+
Use IF or Switch nodes to direct users to different form pages based on their previous answers. This enables dynamic
|
|
54
|
+
form flows where the path changes based on user input, creating personalized form experiences.
|
|
55
|
+
|
|
56
|
+
## Dynamic Form Fields
|
|
57
|
+
|
|
58
|
+
For forms that require dynamic options (e.g., dropdowns populated from an API or previous step), generate the form
|
|
59
|
+
definition in a Code node and pass it to the Form node as JSON. You can define forms using JSON for dynamic or
|
|
60
|
+
conditional fields, and even generate form fields dynamically using a Code node if needed.
|
|
61
|
+
|
|
62
|
+
## Input Validation
|
|
63
|
+
|
|
64
|
+
Validate user input between steps to ensure data quality. If input is invalid, loop back to the relevant form step with
|
|
65
|
+
an error message to guide the user to correct their submission. This prevents bad data from entering your system.
|
|
66
|
+
|
|
67
|
+
## Recommended Nodes
|
|
68
|
+
|
|
69
|
+
### n8n Form Trigger (n8n-nodes-base.formTrigger)
|
|
70
|
+
|
|
71
|
+
Purpose: Starts the workflow and displays the first form page to the user
|
|
72
|
+
|
|
73
|
+
Pitfalls:
|
|
74
|
+
|
|
75
|
+
- Use the Production URL for live forms; the Test URL is for development and debugging only
|
|
76
|
+
- Ensure the form trigger is properly configured before sharing URLs with users
|
|
77
|
+
|
|
78
|
+
### n8n Form (n8n-nodes-base.form)
|
|
79
|
+
|
|
80
|
+
Purpose: Displays form pages in multi-step form sequences
|
|
81
|
+
|
|
82
|
+
Pitfalls:
|
|
83
|
+
|
|
84
|
+
- Each Form node represents one page/step in your form
|
|
85
|
+
- You can define forms using JSON for dynamic or conditional fields
|
|
86
|
+
- Generate form fields dynamically using a Code node if needed for complex scenarios
|
|
87
|
+
|
|
88
|
+
### Storage Nodes
|
|
89
|
+
|
|
90
|
+
Purpose: Persist raw form data to a storage destination, preference should be for built-in n8n tables
|
|
91
|
+
but use the most applicable node depending on the user's request.
|
|
92
|
+
|
|
93
|
+
Required nodes (use at least one):
|
|
94
|
+
- Data table (n8n-nodes-base.dataTable): Built-in n8n storage for quick setup
|
|
95
|
+
- Google Sheets (n8n-nodes-base.googleSheets): Best for simple spreadsheet storage
|
|
96
|
+
- Airtable (n8n-nodes-base.airtable): Best for structured database with relationships
|
|
97
|
+
- Postgres (n8n-nodes-base.postgres) / MySQL (n8n-nodes-base.mySql) / MongoDB (n8n-nodes-base.mongoDb): For production database storage
|
|
98
|
+
|
|
99
|
+
Pitfalls:
|
|
100
|
+
|
|
101
|
+
- Every form workflow MUST include a storage node that actually writes data to a destination
|
|
102
|
+
- Set and Merge nodes alone are NOT sufficient - they only transform data in memory
|
|
103
|
+
- The storage node should be placed immediately after the form trigger (single-step) or after data aggregation (multi-step)
|
|
104
|
+
|
|
105
|
+
### Code (n8n-nodes-base.code)
|
|
106
|
+
|
|
107
|
+
Purpose: Processes form data, generates dynamic form definitions, or implements custom validation logic
|
|
108
|
+
|
|
109
|
+
### Edit Fields (Set) (n8n-nodes-base.set)
|
|
110
|
+
|
|
111
|
+
Purpose: Aggregates and transforms form data between steps (NOT for storage - use a storage node)
|
|
112
|
+
|
|
113
|
+
### Merge (n8n-nodes-base.merge)
|
|
114
|
+
|
|
115
|
+
Purpose: Combines data from multiple form steps into a single dataset (NOT for storage - use a storage node)
|
|
116
|
+
|
|
117
|
+
Pitfalls:
|
|
118
|
+
|
|
119
|
+
- Ensure data from all form steps is properly merged before writing to destination
|
|
120
|
+
- Use appropriate merge modes (append, merge by key, etc.) for your use case
|
|
121
|
+
- Remember: Merge prepares data but does not store it - add a storage node after Merge
|
|
122
|
+
|
|
123
|
+
### If (n8n-nodes-base.if)
|
|
124
|
+
|
|
125
|
+
Purpose: Routes users to different form pages based on their previous answers
|
|
126
|
+
|
|
127
|
+
### Switch (n8n-nodes-base.switch)
|
|
128
|
+
|
|
129
|
+
Purpose: Implements multi-path conditional routing in complex forms
|
|
130
|
+
|
|
131
|
+
Pitfalls:
|
|
132
|
+
|
|
133
|
+
- Include a default case to handle unexpected input values
|
|
134
|
+
- Keep routing logic clear and maintainable
|
|
135
|
+
|
|
136
|
+
## Common Pitfalls to Avoid
|
|
137
|
+
|
|
138
|
+
### Missing Raw Form Response Storage
|
|
139
|
+
|
|
140
|
+
When building n8n forms it is recommended to always store the raw form response to some form of data storage (Googlesheets, Airtable, etc)
|
|
141
|
+
for administration later. It is CRITICAL if you create a n8n form node that you store the raw output with a storage node.
|
|
142
|
+
|
|
143
|
+
### Data Loss in Multi-Step Forms
|
|
144
|
+
|
|
145
|
+
Aggregate all form step data using Set/Merge nodes before writing to your destination. Failing to merge data from multiple steps
|
|
146
|
+
can result in incomplete form submissions being stored. After merging, ensure you write the complete dataset to a storage node.
|
|
147
|
+
|
|
148
|
+
### Poor User Experience
|
|
149
|
+
|
|
150
|
+
Use the Form Ending page type to show a completion message or redirect users after submission.
|
|
151
|
+
Without a proper ending, users may be confused about whether their submission was successful.
|
|
152
|
+
|
|
153
|
+
### Invalid Data
|
|
154
|
+
|
|
155
|
+
Implement validation between form steps to catch errors early. Without validation, invalid data can
|
|
156
|
+
propagate through your workflow and corrupt your destination data.
|
|
157
|
+
|
|
158
|
+
### Complex Field Generation
|
|
159
|
+
|
|
160
|
+
When generating dynamic form fields, ensure the JSON structure exactly matches what the Form
|
|
161
|
+
node expects. Test thoroughly with the Test URL before going live.
|
|
162
|
+
|
|
163
|
+
### Mapping Errors
|
|
164
|
+
|
|
165
|
+
When writing to Google Sheets or other destinations, ensure field names match exactly. Mismatched names
|
|
166
|
+
will cause data to be written to wrong columns or fail entirely.
|
|
167
|
+
`;
|
|
168
|
+
getDocumentation() {
|
|
169
|
+
return this.documentation;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
exports.FormInputBestPractices = FormInputBestPractices;
|
|
173
|
+
//# sourceMappingURL=form-input.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"form-input.js","sourceRoot":"","sources":["../../../src/tools/best-practices/form-input.ts"],"names":[],"mappings":";;;AACA,2DAA2D;AAE3D,MAAa,sBAAsB;IACzB,SAAS,GAAG,kCAAiB,CAAC,UAAU,CAAC;IACzC,OAAO,GAAG,OAAO,CAAC;IAEV,aAAa,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA+JjC,CAAC;IAED,gBAAgB;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;CACD;AAxKD,wDAwKC"}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.documentation = void 0;
|
|
4
|
+
const categorization_1 = require("../../types/categorization");
|
|
5
|
+
const chatbot_1 = require("./chatbot");
|
|
6
|
+
const content_generation_1 = require("./content-generation");
|
|
7
|
+
const data_extraction_1 = require("./data-extraction");
|
|
8
|
+
const form_input_1 = require("./form-input");
|
|
9
|
+
const scraping_and_research_1 = require("./scraping-and-research");
|
|
10
|
+
exports.documentation = {
|
|
11
|
+
[categorization_1.WorkflowTechnique.SCRAPING_AND_RESEARCH]: new scraping_and_research_1.ScrapingAndResearchBestPractices(),
|
|
12
|
+
[categorization_1.WorkflowTechnique.CHATBOT]: new chatbot_1.ChatbotBestPractices(),
|
|
13
|
+
[categorization_1.WorkflowTechnique.CONTENT_GENERATION]: new content_generation_1.ContentGenerationBestPractices(),
|
|
14
|
+
[categorization_1.WorkflowTechnique.DATA_EXTRACTION]: new data_extraction_1.DataExtractionBestPractices(),
|
|
15
|
+
[categorization_1.WorkflowTechnique.FORM_INPUT]: new form_input_1.FormInputBestPractices(),
|
|
16
|
+
[categorization_1.WorkflowTechnique.DATA_ANALYSIS]: undefined,
|
|
17
|
+
[categorization_1.WorkflowTechnique.DATA_TRANSFORMATION]: undefined,
|
|
18
|
+
[categorization_1.WorkflowTechnique.DOCUMENT_PROCESSING]: undefined,
|
|
19
|
+
[categorization_1.WorkflowTechnique.ENRICHMENT]: undefined,
|
|
20
|
+
[categorization_1.WorkflowTechnique.HUMAN_IN_THE_LOOP]: undefined,
|
|
21
|
+
[categorization_1.WorkflowTechnique.KNOWLEDGE_BASE]: undefined,
|
|
22
|
+
[categorization_1.WorkflowTechnique.MONITORING]: undefined,
|
|
23
|
+
[categorization_1.WorkflowTechnique.NOTIFICATION]: undefined,
|
|
24
|
+
[categorization_1.WorkflowTechnique.SCHEDULING]: undefined,
|
|
25
|
+
[categorization_1.WorkflowTechnique.TRIAGE]: undefined,
|
|
26
|
+
};
|
|
27
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/tools/best-practices/index.ts"],"names":[],"mappings":";;;AACA,2DAAuF;AAEvF,uCAAiD;AACjD,6DAAsE;AACtE,uDAAgE;AAChE,6CAAsD;AACtD,mEAA2E;AAE9D,QAAA,aAAa,GAAqE;IAC9F,CAAC,kCAAiB,CAAC,qBAAqB,CAAC,EAAE,IAAI,wDAAgC,EAAE;IACjF,CAAC,kCAAiB,CAAC,OAAO,CAAC,EAAE,IAAI,8BAAoB,EAAE;IACvD,CAAC,kCAAiB,CAAC,kBAAkB,CAAC,EAAE,IAAI,mDAA8B,EAAE;IAC5E,CAAC,kCAAiB,CAAC,eAAe,CAAC,EAAE,IAAI,6CAA2B,EAAE;IACtE,CAAC,kCAAiB,CAAC,UAAU,CAAC,EAAE,IAAI,mCAAsB,EAAE;IAG5D,CAAC,kCAAiB,CAAC,aAAa,CAAC,EAAE,SAAS;IAC5C,CAAC,kCAAiB,CAAC,mBAAmB,CAAC,EAAE,SAAS;IAClD,CAAC,kCAAiB,CAAC,mBAAmB,CAAC,EAAE,SAAS;IAClD,CAAC,kCAAiB,CAAC,UAAU,CAAC,EAAE,SAAS;IACzC,CAAC,kCAAiB,CAAC,iBAAiB,CAAC,EAAE,SAAS;IAChD,CAAC,kCAAiB,CAAC,cAAc,CAAC,EAAE,SAAS;IAC7C,CAAC,kCAAiB,CAAC,UAAU,CAAC,EAAE,SAAS;IACzC,CAAC,kCAAiB,CAAC,YAAY,CAAC,EAAE,SAAS;IAC3C,CAAC,kCAAiB,CAAC,UAAU,CAAC,EAAE,SAAS;IACzC,CAAC,kCAAiB,CAAC,MAAM,CAAC,EAAE,SAAS;CACrC,CAAC"}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import type { BestPracticesDocument } from '../../types/best-practices';
|
|
2
|
+
export declare class ScrapingAndResearchBestPractices implements BestPracticesDocument {
|
|
3
|
+
readonly technique: "scraping_and_research";
|
|
4
|
+
readonly version = "1.0.0";
|
|
5
|
+
private readonly documentation;
|
|
6
|
+
getDocumentation(): string;
|
|
7
|
+
}
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ScrapingAndResearchBestPractices = void 0;
|
|
4
|
+
const categorization_1 = require("../../types/categorization");
|
|
5
|
+
class ScrapingAndResearchBestPractices {
|
|
6
|
+
technique = categorization_1.WorkflowTechnique.SCRAPING_AND_RESEARCH;
|
|
7
|
+
version = '1.0.0';
|
|
8
|
+
documentation = `# Best Practices: Scraping & Research Workflows
|
|
9
|
+
|
|
10
|
+
## Performance & Resource Management
|
|
11
|
+
|
|
12
|
+
Batch requests and introduce delays to avoid hitting API rate limits or overloading target servers. Use Wait nodes and
|
|
13
|
+
batching options in HTTP Request nodes. When 429 rate limiting errors occur due to receiving too many requests,
|
|
14
|
+
implement batching to reduce request frequency or use the "Retry on Fail" feature to automatically handle throttled
|
|
15
|
+
responses.
|
|
16
|
+
|
|
17
|
+
Workflows processing large datasets can crash due to memory constraints. Use the Split In Batches node to process 200
|
|
18
|
+
rows at a time to reduce memory usage, leverage built-in nodes instead of custom code, and increase execution timeouts
|
|
19
|
+
via environment variables for better resource management.
|
|
20
|
+
|
|
21
|
+
## Looping & Pagination
|
|
22
|
+
|
|
23
|
+
Implement robust looping for paginated data. Use Set, IF, and Code nodes to manage page numbers and loop conditions,
|
|
24
|
+
ensuring you don't miss data or create infinite loops. Leverage n8n's built-in mechanisms rather than manual approaches:
|
|
25
|
+
use the $runIndex variable to track iterations without additional code nodes, and employ workflow static data or node
|
|
26
|
+
run indexes to maintain state across loop cycles.
|
|
27
|
+
|
|
28
|
+
## Recommended Nodes
|
|
29
|
+
|
|
30
|
+
### HTTP Request (n8n-nodes-base.httpRequest)
|
|
31
|
+
|
|
32
|
+
Purpose: Fetches web pages or API data for scraping and research workflows
|
|
33
|
+
|
|
34
|
+
Pitfalls:
|
|
35
|
+
|
|
36
|
+
- Depending on the data which the user wishes to scrape/research, it maybe against the terms of service to attempt to
|
|
37
|
+
fetch it from the site directly. Using scraping nodes is the best way to get around this.
|
|
38
|
+
|
|
39
|
+
Pitfalls:
|
|
40
|
+
|
|
41
|
+
- Double-check URL formatting, query parameters, and ensure all required fields are present to avoid bad request errors
|
|
42
|
+
- Be aware of 429 rate limiting errors when the service receives too many requests - implement batching or use "Retry on
|
|
43
|
+
Fail" feature
|
|
44
|
+
- Refresh expired tokens, verify API keys, and ensure correct permissions to avoid authentication failures
|
|
45
|
+
|
|
46
|
+
### HTML Extract (n8n-nodes-base.htmlExtract)
|
|
47
|
+
|
|
48
|
+
Purpose: Parses HTML and extracts data using CSS selectors for web scraping
|
|
49
|
+
|
|
50
|
+
Pitfalls:
|
|
51
|
+
|
|
52
|
+
- Some sites use JavaScript to render content, which may not be accessible via simple HTTP requests. Consider using
|
|
53
|
+
browser automation tools or APIs if the HTML appears empty
|
|
54
|
+
- Validate that the CSS selectors match the actual page structure to avoid extraction failures
|
|
55
|
+
|
|
56
|
+
### Split Out (n8n-nodes-base.splitOut)
|
|
57
|
+
|
|
58
|
+
Purpose: Processes lists of items one by one for sequential operations
|
|
59
|
+
|
|
60
|
+
Pitfalls:
|
|
61
|
+
- Can cause performance issues with very large datasets - consider using Split In Batches instead
|
|
62
|
+
|
|
63
|
+
### Loop Over Items (Split in Batches) (n8n-nodes-base.splitInBatches)
|
|
64
|
+
|
|
65
|
+
Purpose: Processes lists of items in batches to manage memory and performance
|
|
66
|
+
|
|
67
|
+
Pitfalls:
|
|
68
|
+
- Ensure proper loop configuration to avoid infinite loops or skipped data. The index 0
|
|
69
|
+
(first connection) of the loop is treated as the done state, while the index 1 (second connection)
|
|
70
|
+
is the connection that loops.
|
|
71
|
+
- Use appropriate batch sizes (e.g., 200 rows) to balance memory usage and performance
|
|
72
|
+
|
|
73
|
+
### Edit Fields (Set) (n8n-nodes-base.set)
|
|
74
|
+
|
|
75
|
+
Purpose: Manipulates data, sets variables for loop control and state management
|
|
76
|
+
|
|
77
|
+
### Code (n8n-nodes-base.code)
|
|
78
|
+
|
|
79
|
+
Purpose: Implements custom logic for complex data transformations or pagination
|
|
80
|
+
|
|
81
|
+
Pitfalls:
|
|
82
|
+
|
|
83
|
+
- Prefer built-in nodes over custom code to reduce memory usage and improve maintainability
|
|
84
|
+
- Avoid processing very large datasets in a single code execution - use batching
|
|
85
|
+
|
|
86
|
+
### If (n8n-nodes-base.if)
|
|
87
|
+
|
|
88
|
+
Purpose: Adds conditional logic for error handling, loop control, or data filtering
|
|
89
|
+
|
|
90
|
+
Pitfalls:
|
|
91
|
+
- Validate expressions carefully to avoid unexpected branching behavior
|
|
92
|
+
|
|
93
|
+
### Wait (n8n-nodes-base.wait)
|
|
94
|
+
|
|
95
|
+
Purpose: Introduces delays to respect rate limits and avoid overloading servers
|
|
96
|
+
|
|
97
|
+
### Google Sheets (n8n-nodes-base.googleSheets)
|
|
98
|
+
|
|
99
|
+
Purpose: Stores scraped data in spreadsheets for easy access and sharing
|
|
100
|
+
|
|
101
|
+
### Microsoft Excel 365 (n8n-nodes-base.microsoftExcel)
|
|
102
|
+
|
|
103
|
+
Purpose: Stores scraped data in Excel files for offline analysis
|
|
104
|
+
|
|
105
|
+
### Airtable (n8n-nodes-base.airtable)
|
|
106
|
+
|
|
107
|
+
Purpose: Saves structured data to a database with rich data types and relationships
|
|
108
|
+
|
|
109
|
+
### AI Agent (@n8n/n8n-nodes-langchain.agent)
|
|
110
|
+
|
|
111
|
+
Purpose: For research, summarization, and advanced data extraction. AI agents can autonomously gather information
|
|
112
|
+
from websites, analyze content, and organize findings into structured formats, integrating tools for web scraping,
|
|
113
|
+
content analysis, and database storage
|
|
114
|
+
|
|
115
|
+
### Scraping Nodes
|
|
116
|
+
|
|
117
|
+
- Phantombuster (n8n-nodes-base.phantombuster)
|
|
118
|
+
- Apify (use HTTP Request or community node)
|
|
119
|
+
- BrightData (use HTTP Request or community node)
|
|
120
|
+
|
|
121
|
+
Purpose: If the user wishes to scrap data from sites like LinkedIn, Facebook, Instagram, Twitter/X, Indeed, Glassdoor
|
|
122
|
+
or any other service similar to these large providers it is better to use a node designed for this. The scraping
|
|
123
|
+
nodes provide access to these datasets while avoiding issues like rate limiting or breaking terms of service for
|
|
124
|
+
sites like these.
|
|
125
|
+
|
|
126
|
+
## Common Pitfalls to Avoid
|
|
127
|
+
|
|
128
|
+
Bad Request Errors: Double-check URL formatting, query parameters, and ensure all required fields are present to
|
|
129
|
+
avoid 400 errors when making HTTP requests.
|
|
130
|
+
|
|
131
|
+
Rate Limits: Use batching and Wait nodes to avoid 429 errors. When the service receives too many requests, implement
|
|
132
|
+
batching to reduce request frequency or use the "Retry on Fail" feature.
|
|
133
|
+
|
|
134
|
+
Memory Issues: Avoid processing very large datasets in a single run; use batching and increase server resources if
|
|
135
|
+
needed. Use Split In Batches node to process 200 rows at a time, leverage built-in nodes instead of custom code, and
|
|
136
|
+
increase execution timeouts via environment variables.
|
|
137
|
+
|
|
138
|
+
Empty or Unexpected Data: Some sites use JavaScript to render content, which may not be accessible via simple HTTP
|
|
139
|
+
requests. Standard HTTP and HTML parsing nodes fail because sites load data asynchronously via JavaScript, leaving the
|
|
140
|
+
initial HTML empty of actual content. Web scraping nodes can be used to avoid this.
|
|
141
|
+
`;
|
|
142
|
+
getDocumentation() {
|
|
143
|
+
return this.documentation;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
exports.ScrapingAndResearchBestPractices = ScrapingAndResearchBestPractices;
|
|
147
|
+
//# sourceMappingURL=scraping-and-research.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"scraping-and-research.js","sourceRoot":"","sources":["../../../src/tools/best-practices/scraping-and-research.ts"],"names":[],"mappings":";;;AACA,2DAA2D;AAE3D,MAAa,gCAAgC;IACnC,SAAS,GAAG,kCAAiB,CAAC,qBAAqB,CAAC;IACpD,OAAO,GAAG,OAAO,CAAC;IAEV,aAAa,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAqIjC,CAAC;IAED,gBAAgB;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;CACD;AA9ID,4EA8IC"}
|
|
@@ -3,7 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.getBuilderTools = getBuilderTools;
|
|
4
4
|
exports.getBuilderToolsForDisplay = getBuilderToolsForDisplay;
|
|
5
5
|
const add_node_tool_1 = require("./add-node.tool");
|
|
6
|
+
const categorize_prompt_tool_1 = require("./categorize-prompt.tool");
|
|
6
7
|
const connect_nodes_tool_1 = require("./connect-nodes.tool");
|
|
8
|
+
const get_best_practices_tool_1 = require("./get-best-practices.tool");
|
|
7
9
|
const get_node_parameter_tool_1 = require("./get-node-parameter.tool");
|
|
8
10
|
const node_details_tool_1 = require("./node-details.tool");
|
|
9
11
|
const node_search_tool_1 = require("./node-search.tool");
|
|
@@ -13,6 +15,8 @@ const update_node_parameters_tool_1 = require("./update-node-parameters.tool");
|
|
|
13
15
|
const validate_workflow_tool_1 = require("./validate-workflow.tool");
|
|
14
16
|
function getBuilderTools({ parsedNodeTypes, logger, llmComplexTask, instanceUrl, }) {
|
|
15
17
|
return [
|
|
18
|
+
(0, categorize_prompt_tool_1.createCategorizePromptTool)(llmComplexTask, logger),
|
|
19
|
+
(0, get_best_practices_tool_1.createGetBestPracticesTool)(),
|
|
16
20
|
(0, node_search_tool_1.createNodeSearchTool)(parsedNodeTypes),
|
|
17
21
|
(0, node_details_tool_1.createNodeDetailsTool)(parsedNodeTypes),
|
|
18
22
|
(0, add_node_tool_1.createAddNodeTool)(parsedNodeTypes),
|
|
@@ -26,6 +30,8 @@ function getBuilderTools({ parsedNodeTypes, logger, llmComplexTask, instanceUrl,
|
|
|
26
30
|
}
|
|
27
31
|
function getBuilderToolsForDisplay({ nodeTypes, }) {
|
|
28
32
|
return [
|
|
33
|
+
categorize_prompt_tool_1.CATEGORIZE_PROMPT_TOOL,
|
|
34
|
+
get_best_practices_tool_1.GET_BEST_PRACTICES_TOOL,
|
|
29
35
|
node_search_tool_1.NODE_SEARCH_TOOL,
|
|
30
36
|
node_details_tool_1.NODE_DETAILS_TOOL,
|
|
31
37
|
(0, add_node_tool_1.getAddNodeToolBase)(nodeTypes),
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"builder-tools.js","sourceRoot":"","sources":["../../src/tools/builder-tools.ts"],"names":[],"mappings":";;
|
|
1
|
+
{"version":3,"file":"builder-tools.js","sourceRoot":"","sources":["../../src/tools/builder-tools.ts"],"names":[],"mappings":";;AAqBA,0CAwBC;AAOD,8DAgBC;AA9DD,mDAAwE;AACxE,qEAA8F;AAC9F,6DAAkF;AAClF,uEAAgG;AAChG,uEAAgG;AAChG,2DAA+E;AAC/E,yDAA4E;AAC5E,qEAA8F;AAC9F,yDAA4E;AAC5E,+EAGuC;AACvC,qEAA8F;AAE9F,SAAgB,eAAe,CAAC,EAC/B,eAAe,EACf,MAAM,EACN,cAAc,EACd,WAAW,GAMX;IACA,OAAO;QACN,IAAA,mDAA0B,EAAC,cAAc,EAAE,MAAM,CAAC;QAClD,IAAA,oDAA0B,GAAE;QAC5B,IAAA,uCAAoB,EAAC,eAAe,CAAC;QACrC,IAAA,yCAAqB,EAAC,eAAe,CAAC;QACtC,IAAA,iCAAiB,EAAC,eAAe,CAAC;QAClC,IAAA,2CAAsB,EAAC,eAAe,EAAE,MAAM,CAAC;QAC/C,IAAA,mDAA0B,EAAC,MAAM,CAAC;QAClC,IAAA,uCAAoB,EAAC,MAAM,CAAC;QAC5B,IAAA,4DAA8B,EAAC,eAAe,EAAE,cAAc,EAAE,MAAM,EAAE,WAAW,CAAC;QACpF,IAAA,oDAA0B,GAAE;QAC5B,IAAA,mDAA0B,EAAC,eAAe,EAAE,MAAM,CAAC;KACnD,CAAC;AACH,CAAC;AAOD,SAAgB,yBAAyB,CAAC,EACzC,SAAS,GAC8B;IACvC,OAAO;QACN,+CAAsB;QACtB,iDAAuB;QACvB,mCAAgB;QAChB,qCAAiB;QACjB,IAAA,kCAAkB,EAAC,SAAS,CAAC;QAC7B,uCAAkB;QAClB,+CAAsB;QACtB,mCAAgB;QAChB,0DAA4B;QAC5B,iDAAuB;QACvB,+CAAsB;KACtB,CAAC;AACH,CAAC"}
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
2
|
+
import type { Logger } from '@n8n/backend-common';
|
|
3
|
+
import type { BuilderTool, BuilderToolBase } from '../utils/stream-processor';
|
|
4
|
+
export declare const CATEGORIZE_PROMPT_TOOL: BuilderToolBase;
|
|
5
|
+
export declare function createCategorizePromptTool(llm: BaseChatModel, logger?: Logger): BuilderTool;
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.CATEGORIZE_PROMPT_TOOL = void 0;
|
|
4
|
+
exports.createCategorizePromptTool = createCategorizePromptTool;
|
|
5
|
+
const tools_1 = require("@langchain/core/tools");
|
|
6
|
+
const zod_1 = require("zod");
|
|
7
|
+
const prompt_categorization_1 = require("../chains/prompt-categorization");
|
|
8
|
+
const errors_1 = require("../errors");
|
|
9
|
+
const progress_1 = require("../tools/helpers/progress");
|
|
10
|
+
const response_1 = require("../tools/helpers/response");
|
|
11
|
+
const categorizePromptSchema = zod_1.z.object({
|
|
12
|
+
prompt: zod_1.z.string().min(1).describe('The user prompt to categorize'),
|
|
13
|
+
});
|
|
14
|
+
function buildCategorizationMessage(categorization) {
|
|
15
|
+
const parts = [];
|
|
16
|
+
parts.push('Prompt categorized');
|
|
17
|
+
if (categorization.techniques.length > 0) {
|
|
18
|
+
parts.push(`- Techniques: ${categorization.techniques.join(', ')}`);
|
|
19
|
+
}
|
|
20
|
+
if (categorization.confidence !== undefined) {
|
|
21
|
+
parts.push(`- Confidence: ${(categorization.confidence * 100).toFixed(0)}%`);
|
|
22
|
+
}
|
|
23
|
+
return parts.join('\n');
|
|
24
|
+
}
|
|
25
|
+
exports.CATEGORIZE_PROMPT_TOOL = {
|
|
26
|
+
toolName: 'categorize_prompt',
|
|
27
|
+
displayTitle: 'Categorizing prompt',
|
|
28
|
+
};
|
|
29
|
+
function createCategorizePromptTool(llm, logger) {
|
|
30
|
+
const dynamicTool = (0, tools_1.tool)(async (input, config) => {
|
|
31
|
+
const reporter = (0, progress_1.createProgressReporter)(config, exports.CATEGORIZE_PROMPT_TOOL.toolName, exports.CATEGORIZE_PROMPT_TOOL.displayTitle);
|
|
32
|
+
try {
|
|
33
|
+
const validatedInput = categorizePromptSchema.parse(input);
|
|
34
|
+
const { prompt } = validatedInput;
|
|
35
|
+
reporter.start(validatedInput);
|
|
36
|
+
logger?.debug('Categorizing user prompt using LLM...');
|
|
37
|
+
reporter.progress('Analyzing prompt to identify use case and techniques...');
|
|
38
|
+
const categorization = await (0, prompt_categorization_1.promptCategorizationChain)(llm, prompt);
|
|
39
|
+
logger?.debug('Prompt categorized', {
|
|
40
|
+
techniques: categorization.techniques,
|
|
41
|
+
confidence: categorization.confidence,
|
|
42
|
+
});
|
|
43
|
+
const output = {
|
|
44
|
+
categorization,
|
|
45
|
+
};
|
|
46
|
+
reporter.complete(output);
|
|
47
|
+
return (0, response_1.createSuccessResponse)(config, buildCategorizationMessage(categorization), {
|
|
48
|
+
categorization,
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
catch (error) {
|
|
52
|
+
if (error instanceof zod_1.z.ZodError) {
|
|
53
|
+
const validationError = new errors_1.ValidationError('Invalid input parameters', {
|
|
54
|
+
extra: { errors: error.errors },
|
|
55
|
+
});
|
|
56
|
+
reporter.error(validationError);
|
|
57
|
+
return (0, response_1.createErrorResponse)(config, validationError);
|
|
58
|
+
}
|
|
59
|
+
const toolError = new errors_1.ToolExecutionError(error instanceof Error ? error.message : 'Unknown error occurred', {
|
|
60
|
+
toolName: exports.CATEGORIZE_PROMPT_TOOL.toolName,
|
|
61
|
+
cause: error instanceof Error ? error : undefined,
|
|
62
|
+
});
|
|
63
|
+
reporter.error(toolError);
|
|
64
|
+
return (0, response_1.createErrorResponse)(config, toolError);
|
|
65
|
+
}
|
|
66
|
+
}, {
|
|
67
|
+
name: exports.CATEGORIZE_PROMPT_TOOL.toolName,
|
|
68
|
+
description: `Categorize a user's workflow request to identify the use case and required techniques.
|
|
69
|
+
|
|
70
|
+
This helps understand what type of workflow the user wants to build and which automation patterns will be needed.
|
|
71
|
+
|
|
72
|
+
Use this tool when you receive an initial workflow request to:
|
|
73
|
+
- Detect required techniques (e.g., scraping, data transformation, notifications)
|
|
74
|
+
- Better understand the user's needs and context
|
|
75
|
+
|
|
76
|
+
The categorization allows retrieving relevant best practice documentation to improve workflow structure and node selection.`,
|
|
77
|
+
schema: categorizePromptSchema,
|
|
78
|
+
});
|
|
79
|
+
return {
|
|
80
|
+
tool: dynamicTool,
|
|
81
|
+
...exports.CATEGORIZE_PROMPT_TOOL,
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
//# sourceMappingURL=categorize-prompt.tool.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"categorize-prompt.tool.js","sourceRoot":"","sources":["../../src/tools/categorize-prompt.tool.ts"],"names":[],"mappings":";;;AAsCA,gEAwEC;AA7GD,iDAA6C;AAE7C,6BAAwB;AAExB,0EAA2E;AAC3E,qCAA+D;AAC/D,uDAAkE;AAClE,uDAAsF;AAKtF,MAAM,sBAAsB,GAAG,OAAC,CAAC,MAAM,CAAC;IACvC,MAAM,EAAE,OAAC,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,+BAA+B,CAAC;CACnE,CAAC,CAAC;AAEH,SAAS,0BAA0B,CAAC,cAAoC;IACvE,MAAM,KAAK,GAAa,EAAE,CAAC;IAE3B,KAAK,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC;IAEjC,IAAI,cAAc,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAC1C,KAAK,CAAC,IAAI,CAAC,iBAAiB,cAAc,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;IACrE,CAAC;IAED,IAAI,cAAc,CAAC,UAAU,KAAK,SAAS,EAAE,CAAC;QAC7C,KAAK,CAAC,IAAI,CAAC,iBAAiB,CAAC,cAAc,CAAC,UAAU,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;IAC9E,CAAC;IAED,OAAO,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;AACzB,CAAC;AAEY,QAAA,sBAAsB,GAAoB;IACtD,QAAQ,EAAE,mBAAmB;IAC7B,YAAY,EAAE,qBAAqB;CACnC,CAAC;AAEF,SAAgB,0BAA0B,CAAC,GAAkB,EAAE,MAAe;IAC7E,MAAM,WAAW,GAAG,IAAA,YAAI,EACvB,KAAK,EAAE,KAAK,EAAE,MAAM,EAAE,EAAE;QACvB,MAAM,QAAQ,GAAG,IAAA,iCAAsB,EACtC,MAAM,EACN,8BAAsB,CAAC,QAAQ,EAC/B,8BAAsB,CAAC,YAAY,CACnC,CAAC;QAEF,IAAI,CAAC;YACJ,MAAM,cAAc,GAAG,sBAAsB,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;YAC3D,MAAM,EAAE,MAAM,EAAE,GAAG,cAAc,CAAC;YAElC,QAAQ,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC;YAE/B,MAAM,EAAE,KAAK,CAAC,uCAAuC,CAAC,CAAC;YACvD,QAAQ,CAAC,QAAQ,CAAC,yDAAyD,CAAC,CAAC;YAE7E,MAAM,cAAc,GAAG,MAAM,IAAA,iDAAyB,EAAC,GAAG,EAAE,MAAM,CAAC,CAAC;YAEpE,MAAM,EAAE,KAAK,CAAC,oBAAoB,EAAE;gBACnC,UAAU,EAAE,cAAc,CAAC,UAAU;gBACrC,UAAU,EAAE,cAAc,CAAC,UAAU;aACrC,CAAC,CAAC;YAEH,MAAM,MAAM,GAA2B;gBACtC,cAAc;aACd,CAAC;YACF,QAAQ,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YAE1B,OAAO,IAAA,gCAAqB,EAAC,MAAM,EAAE,0BAA0B,CAAC,cAAc,CAAC,EAAE;gBAChF,cAAc;aACd,CAAC,CAAC;QACJ,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,IAAI,KAAK,YAAY,OAAC,CAAC,QAAQ,EAAE,CAAC;gBACjC,MAAM,eAAe,GAAG,IAAI,wBAAe,CAAC,0BAA0B,EAAE;oBACvE,KAAK,EAAE,EAAE,MAAM,EAAE,KAAK,CAAC,MAAM,EAAE;iBAC/B,CAAC,CAAC;gBACH,QAAQ,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC;gBAChC,OAAO,IAAA,8BAAmB,EAAC,MAAM,EAAE,eAAe,CAAC,CAAC;YACrD,CAAC;YAED,MAAM,SAAS,GAAG,IAAI,2BAAkB,CACvC,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,wBAAwB,EACjE;gBACC,QAAQ,EAAE,8BAAsB,CAAC,QAAQ;gBACzC,KAAK,EAAE,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS;aACjD,CACD,CAAC;YACF,QAAQ,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC;YAC1B,OAAO,IAAA,8BAAmB,EAAC,MAAM,EAAE,SAAS,CAAC,CAAC;QAC/C,CAAC;IACF,CAAC,EACD;QACC,IAAI,EAAE,8BAAsB,CAAC,QAAQ;QACrC,WAAW,EAAE;;;;;;;;4HAQ4G;QACzH,MAAM,EAAE,sBAAsB;KAC9B,CACD,CAAC;IAEF,OAAO;QACN,IAAI,EAAE,WAAW;QACjB,GAAG,8BAAsB;KACzB,CAAC;AACH,CAAC"}
|
|
@@ -1,12 +1,6 @@
|
|
|
1
1
|
import type { INodeTypeDescription, NodeConnectionType } from 'n8n-workflow';
|
|
2
2
|
import type { NodeSearchResult } from '../../types/nodes';
|
|
3
3
|
export declare const SCORE_WEIGHTS: {
|
|
4
|
-
readonly NAME_CONTAINS: 10;
|
|
5
|
-
readonly DISPLAY_NAME_CONTAINS: 8;
|
|
6
|
-
readonly DESCRIPTION_CONTAINS: 5;
|
|
7
|
-
readonly ALIAS_CONTAINS: 8;
|
|
8
|
-
readonly NAME_EXACT: 20;
|
|
9
|
-
readonly DISPLAY_NAME_EXACT: 15;
|
|
10
4
|
readonly CONNECTION_EXACT: 100;
|
|
11
5
|
readonly CONNECTION_IN_EXPRESSION: 50;
|
|
12
6
|
};
|
|
@@ -16,10 +10,7 @@ export declare class NodeSearchEngine {
|
|
|
16
10
|
searchByName(query: string, limit?: number): NodeSearchResult[];
|
|
17
11
|
searchByConnectionType(connectionType: NodeConnectionType, limit?: number, nameFilter?: string): NodeSearchResult[];
|
|
18
12
|
formatResult(result: NodeSearchResult): string;
|
|
19
|
-
private calculateNameScore;
|
|
20
13
|
private getConnectionScore;
|
|
21
|
-
private createSearchResult;
|
|
22
|
-
private sortAndLimit;
|
|
23
14
|
static isAiConnectionType(connectionType: string): boolean;
|
|
24
15
|
static getAiConnectionTypes(): NodeConnectionType[];
|
|
25
16
|
}
|