n8n-nodes-berget-mk 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/README.md +16 -16
  2. package/dist/nodes/BergetAi/BergetAi.node.d.ts +13 -0
  3. package/dist/nodes/BergetAi/BergetAi.node.js +144 -0
  4. package/dist/nodes/BergetAi/chat.d.ts +3 -0
  5. package/dist/nodes/BergetAi/chat.js +139 -0
  6. package/dist/nodes/BergetAi/embeddings.d.ts +3 -0
  7. package/dist/nodes/BergetAi/embeddings.js +71 -0
  8. package/dist/nodes/BergetAi/ocr.d.ts +3 -0
  9. package/dist/nodes/BergetAi/ocr.js +196 -0
  10. package/dist/nodes/BergetAi/rerank.d.ts +3 -0
  11. package/dist/nodes/BergetAi/rerank.js +108 -0
  12. package/dist/nodes/BergetAi/shared.d.ts +17 -0
  13. package/dist/nodes/BergetAi/shared.js +46 -0
  14. package/dist/nodes/BergetAi/speech.d.ts +3 -0
  15. package/dist/nodes/BergetAi/speech.js +108 -0
  16. package/package.json +4 -8
  17. package/dist/nodes/BergetAiChat/BergetAiChat.node.d.ts +0 -10
  18. package/dist/nodes/BergetAiChat/BergetAiChat.node.js +0 -271
  19. package/dist/nodes/BergetAiEmbeddings/BergetAiEmbeddings.node.d.ts +0 -10
  20. package/dist/nodes/BergetAiEmbeddings/BergetAiEmbeddings.node.js +0 -179
  21. package/dist/nodes/BergetAiEmbeddings/bergetai.svg +0 -3
  22. package/dist/nodes/BergetAiOcr/BergetAiOcr.node.d.ts +0 -5
  23. package/dist/nodes/BergetAiOcr/BergetAiOcr.node.js +0 -322
  24. package/dist/nodes/BergetAiOcr/bergetai.svg +0 -3
  25. package/dist/nodes/BergetAiRerank/BergetAiRerank.node.d.ts +0 -10
  26. package/dist/nodes/BergetAiRerank/BergetAiRerank.node.js +0 -212
  27. package/dist/nodes/BergetAiRerank/bergetai.svg +0 -3
  28. package/dist/nodes/BergetAiSpeech/BergetAiSpeech.node.d.ts +0 -10
  29. package/dist/nodes/BergetAiSpeech/BergetAiSpeech.node.js +0 -211
  30. package/dist/nodes/BergetAiSpeech/bergetai.svg +0 -3
  31. /package/dist/nodes/{BergetAiChat → BergetAi}/bergetai.svg +0 -0
package/README.md CHANGED
@@ -1,19 +1,13 @@
1
1
  # n8n-nodes-berget-mk
2
2
 
3
- n8n community nodes for [Berget AI](https://berget.ai), packaged as a single installable module. Maintained by Micke Kring.
3
+ n8n community node for [Berget AI](https://berget.ai), packaged as a single installable module. Maintained by Micke Kring.
4
4
 
5
- Includes:
5
+ Two nodes, two purposes:
6
6
 
7
- - **Berget AI Chat** one-shot chat completions (action node)
8
- - **Berget AI Chat Model** — sub-node that plugs into n8n's built-in **AI Agent**, **Basic LLM Chain**, and other LangChain-based nodes. Exposes `reasoning_effort` and the full standard LLM parameter set.
9
- - **Berget AI Embeddings** — create text embeddings
10
- - **Berget AI OCR** — document text extraction (PDF, DOCX, images)
11
- - **Berget AI Speech-to-Text** — audio transcription (KB-Whisper for Swedish)
12
- - **Berget AI Rerank** — document reranking
7
+ - **Berget AI** — a multi-resource action node with five resources: **Chat**, **Embeddings**, **OCR**, **Rerank**, **Speech to Text**. Use this for one-shot calls.
8
+ - **Berget AI Chat Model** — a sub-node that plugs into n8n's built-in **AI Agent**, **Basic LLM Chain**, and any other LangChain-based node. Use this to drive an agent with Berget AI as the underlying LLM. Exposes `reasoning_effort` and the full standard LLM parameter set.
13
9
 
14
- > ⚠️ **Experimental — actively developed.** This package is pre-1.0 and the shape of individual nodes may change between minor releases. Pin a specific version in production workflows until `1.0.0`.
15
- >
16
- > **Breaking change in `0.2.0`:** the home-built `Berget AI Agent` node was removed and replaced with a `Berget AI Chat Model` sub-node that plugs into n8n's built-in **AI Agent**. See [CHANGELOG.md](CHANGELOG.md) for migration notes.
10
+ > ⚠️ **Experimental — actively developed.** This package is pre-1.0 and the shape of nodes may change between minor releases. Pin a specific version in production workflows until `1.0.0`. See [CHANGELOG.md](CHANGELOG.md) for breaking changes.
17
11
 
18
12
  ## Install
19
13
 
@@ -25,13 +19,19 @@ n8n-nodes-berget-mk
25
19
 
26
20
  Then add a **Berget AI API** credential with your API key from [berget.ai](https://berget.ai).
27
21
 
22
+ ## Using the Berget AI action node
23
+
24
+ 1. Drop **Berget AI** onto the canvas.
25
+ 2. Select a **Resource** (Chat, Embeddings, OCR, Rerank, or Speech to Text).
26
+ 3. Fill in the resource-specific fields (model, input, options).
27
+ 4. Execute.
28
+
28
29
  ## Using Berget with n8n's AI Agent
29
30
 
30
- 1. Add n8n's built-in **AI Agent** node to your workflow.
31
- 2. Add a **Berget AI Chat Model** node and drag it onto the canvas below the Agent.
32
- 3. Connect it to the Agent's **Chat Model** socket.
33
- 4. Select a Berget AI chat model (the list loads live from the API).
34
- 5. Optionally add Memory and Tool sub-nodes — they work as normal with Berget as the underlying LLM.
31
+ 1. Add n8n's built-in **AI Agent** node.
32
+ 2. Add a **Berget AI Chat Model** node and connect it to the Agent's **Chat Model** socket.
33
+ 3. Pick a Berget AI chat model and optionally configure `reasoning_effort` for reasoning-capable models (GPT-OSS, DeepSeek R1).
34
+ 4. Add Memory and Tool sub-nodes as usual they work with Berget as the underlying LLM.
35
35
 
36
36
  ## Changelog
37
37
 
@@ -0,0 +1,13 @@
1
+ import { type IExecuteFunctions, type ILoadOptionsFunctions, type INodeExecutionData, type INodePropertyOptions, type INodeType, type INodeTypeDescription } from 'n8n-workflow';
2
+ export declare class BergetAi implements INodeType {
3
+ description: INodeTypeDescription;
4
+ methods: {
5
+ loadOptions: {
6
+ getChatModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
7
+ getEmbeddingsModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
8
+ getRerankModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
9
+ getSpeechModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
10
+ };
11
+ };
12
+ execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
13
+ }
@@ -0,0 +1,144 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.BergetAi = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ const n8n_workflow_2 = require("n8n-workflow");
6
+ const chat_1 = require("./chat");
7
+ const embeddings_1 = require("./embeddings");
8
+ const ocr_1 = require("./ocr");
9
+ const rerank_1 = require("./rerank");
10
+ const speech_1 = require("./speech");
11
+ const shared_1 = require("./shared");
12
+ class BergetAi {
13
+ constructor() {
14
+ this.description = {
15
+ displayName: 'Berget AI',
16
+ name: 'bergetAi',
17
+ icon: 'file:bergetai.svg',
18
+ group: ['transform'],
19
+ version: 1,
20
+ subtitle: '={{$parameter["resource"]}}',
21
+ description: 'Use Berget AI for chat completions, embeddings, document OCR, speech-to-text, and document reranking',
22
+ defaults: { name: 'Berget AI' },
23
+ codex: {
24
+ categories: ['AI'],
25
+ subcategories: {
26
+ AI: ['Miscellaneous'],
27
+ },
28
+ alias: ['Berget', 'Berget AI', 'Swedish AI', 'LLM'],
29
+ },
30
+ credentials: [
31
+ {
32
+ name: 'bergetAiApi',
33
+ required: true,
34
+ },
35
+ ],
36
+ inputs: [n8n_workflow_1.NodeConnectionTypes.Main],
37
+ outputs: [n8n_workflow_1.NodeConnectionTypes.Main],
38
+ properties: [
39
+ {
40
+ displayName: 'Resource',
41
+ name: 'resource',
42
+ type: 'options',
43
+ noDataExpression: true,
44
+ default: 'chat',
45
+ options: [
46
+ {
47
+ name: 'Chat',
48
+ value: 'chat',
49
+ description: 'Create a chat completion',
50
+ },
51
+ {
52
+ name: 'Embeddings',
53
+ value: 'embeddings',
54
+ description: 'Generate vector embeddings from text',
55
+ },
56
+ {
57
+ name: 'OCR',
58
+ value: 'ocr',
59
+ description: 'Extract text from a document (PDF, DOCX, images)',
60
+ },
61
+ {
62
+ name: 'Rerank',
63
+ value: 'rerank',
64
+ description: 'Rerank documents by relevance to a query',
65
+ },
66
+ {
67
+ name: 'Speech to Text',
68
+ value: 'speech',
69
+ description: 'Transcribe audio to text',
70
+ },
71
+ ],
72
+ },
73
+ ...chat_1.chatProperties,
74
+ ...embeddings_1.embeddingsProperties,
75
+ ...ocr_1.ocrProperties,
76
+ ...rerank_1.rerankProperties,
77
+ ...speech_1.speechProperties,
78
+ ],
79
+ };
80
+ this.methods = {
81
+ loadOptions: {
82
+ async getChatModels() {
83
+ return (0, shared_1.loadModelOptions)(this, (m) => m.model_type === 'text' || m.model_type === 'ocr');
84
+ },
85
+ async getEmbeddingsModels() {
86
+ return (0, shared_1.loadModelOptions)(this, (m) => m.model_type === 'embedding');
87
+ },
88
+ async getRerankModels() {
89
+ return (0, shared_1.loadModelOptions)(this, (m) => m.model_type === 'rerank');
90
+ },
91
+ async getSpeechModels() {
92
+ return (0, shared_1.loadModelOptions)(this, (m) => m.model_type === 'speech-to-text');
93
+ },
94
+ },
95
+ };
96
+ }
97
+ async execute() {
98
+ const items = this.getInputData();
99
+ const returnData = [];
100
+ for (let i = 0; i < items.length; i++) {
101
+ try {
102
+ const resource = this.getNodeParameter('resource', i);
103
+ let result;
104
+ switch (resource) {
105
+ case 'chat':
106
+ result = await (0, chat_1.executeChat)(this, i);
107
+ break;
108
+ case 'embeddings':
109
+ result = await (0, embeddings_1.executeEmbeddings)(this, i);
110
+ break;
111
+ case 'ocr':
112
+ result = await (0, ocr_1.executeOcr)(this, i);
113
+ break;
114
+ case 'rerank':
115
+ result = await (0, rerank_1.executeRerank)(this, i);
116
+ break;
117
+ case 'speech':
118
+ result = await (0, speech_1.executeSpeech)(this, i);
119
+ break;
120
+ default:
121
+ throw new n8n_workflow_2.NodeOperationError(this.getNode(), `Unknown resource: ${resource}`, { itemIndex: i });
122
+ }
123
+ returnData.push({
124
+ json: result,
125
+ pairedItem: { item: i },
126
+ });
127
+ }
128
+ catch (error) {
129
+ if (this.continueOnFail()) {
130
+ returnData.push({
131
+ json: {
132
+ error: error instanceof Error ? error.message : String(error),
133
+ },
134
+ pairedItem: { item: i },
135
+ });
136
+ continue;
137
+ }
138
+ throw error;
139
+ }
140
+ }
141
+ return [returnData];
142
+ }
143
+ }
144
+ exports.BergetAi = BergetAi;
@@ -0,0 +1,3 @@
1
+ import type { IDataObject, IExecuteFunctions, INodeProperties } from 'n8n-workflow';
2
+ export declare const chatProperties: INodeProperties[];
3
+ export declare function executeChat(context: IExecuteFunctions, itemIndex: number): Promise<IDataObject>;
@@ -0,0 +1,139 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.chatProperties = void 0;
4
+ exports.executeChat = executeChat;
5
+ const n8n_workflow_1 = require("n8n-workflow");
6
+ const shared_1 = require("./shared");
7
+ const showForChat = {
8
+ displayOptions: {
9
+ show: {
10
+ resource: ['chat'],
11
+ },
12
+ },
13
+ };
14
+ exports.chatProperties = [
15
+ {
16
+ displayName: 'Model',
17
+ name: 'chatModel',
18
+ type: 'options',
19
+ typeOptions: { loadOptionsMethod: 'getChatModels' },
20
+ default: '',
21
+ required: true,
22
+ description: 'The Berget AI chat model to use',
23
+ ...showForChat,
24
+ },
25
+ {
26
+ displayName: 'Messages',
27
+ name: 'chatMessages',
28
+ type: 'fixedCollection',
29
+ typeOptions: { multipleValues: true },
30
+ default: {
31
+ values: [{ role: 'user', content: '' }],
32
+ },
33
+ options: [
34
+ {
35
+ displayName: 'Values',
36
+ name: 'values',
37
+ values: [
38
+ {
39
+ displayName: 'Role',
40
+ name: 'role',
41
+ type: 'options',
42
+ options: [
43
+ { name: 'System', value: 'system' },
44
+ { name: 'User', value: 'user' },
45
+ { name: 'Assistant', value: 'assistant' },
46
+ ],
47
+ default: 'user',
48
+ },
49
+ {
50
+ displayName: 'Content',
51
+ name: 'content',
52
+ type: 'string',
53
+ typeOptions: { rows: 2 },
54
+ default: '',
55
+ description: 'Message content',
56
+ },
57
+ ],
58
+ },
59
+ ],
60
+ ...showForChat,
61
+ },
62
+ {
63
+ displayName: 'Options',
64
+ name: 'chatOptions',
65
+ type: 'collection',
66
+ placeholder: 'Add Option',
67
+ default: {},
68
+ options: [
69
+ {
70
+ displayName: 'Max Tokens',
71
+ name: 'max_tokens',
72
+ type: 'number',
73
+ typeOptions: { minValue: 1 },
74
+ default: 1024,
75
+ description: 'Maximum number of tokens to generate',
76
+ },
77
+ {
78
+ displayName: 'Response Format',
79
+ name: 'response_format',
80
+ type: 'options',
81
+ options: [
82
+ { name: 'Text', value: 'text' },
83
+ { name: 'JSON Object', value: 'json_object' },
84
+ ],
85
+ default: 'text',
86
+ },
87
+ {
88
+ displayName: 'Temperature',
89
+ name: 'temperature',
90
+ type: 'number',
91
+ typeOptions: { minValue: 0, maxValue: 2, numberPrecision: 2 },
92
+ default: 1,
93
+ description: 'Controls randomness. Lower is more deterministic.',
94
+ },
95
+ {
96
+ displayName: 'Top P',
97
+ name: 'top_p',
98
+ type: 'number',
99
+ typeOptions: { minValue: 0, maxValue: 1, numberPrecision: 2 },
100
+ default: 1,
101
+ description: 'Nucleus sampling cutoff',
102
+ },
103
+ {
104
+ displayName: 'User ID',
105
+ name: 'user',
106
+ type: 'string',
107
+ default: '',
108
+ description: 'Unique identifier for tracking and abuse prevention',
109
+ },
110
+ ],
111
+ ...showForChat,
112
+ },
113
+ ];
114
+ async function executeChat(context, itemIndex) {
115
+ var _a, _b;
116
+ const credentials = await context.getCredentials('bergetAiApi');
117
+ const model = context.getNodeParameter('chatModel', itemIndex);
118
+ const messages = context.getNodeParameter('chatMessages.values', itemIndex, []);
119
+ const options = context.getNodeParameter('chatOptions', itemIndex, {});
120
+ const body = {
121
+ model,
122
+ messages,
123
+ ...options,
124
+ };
125
+ if (options.response_format === 'json_object') {
126
+ body.response_format = { type: 'json_object' };
127
+ }
128
+ else {
129
+ delete body.response_format;
130
+ }
131
+ const { status, data } = await (0, shared_1.bergetRequest)(credentials.apiKey, 'POST', '/chat/completions', body);
132
+ if (status !== 200) {
133
+ const message = (_b = (_a = data === null || data === void 0 ? void 0 : data.error) === null || _a === void 0 ? void 0 : _a.message) !== null && _b !== void 0 ? _b : `HTTP ${status}`;
134
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), `Berget AI chat error: ${message}`, {
135
+ itemIndex,
136
+ });
137
+ }
138
+ return data;
139
+ }
@@ -0,0 +1,3 @@
1
+ import type { IDataObject, IExecuteFunctions, INodeProperties } from 'n8n-workflow';
2
+ export declare const embeddingsProperties: INodeProperties[];
3
+ export declare function executeEmbeddings(context: IExecuteFunctions, itemIndex: number): Promise<IDataObject>;
@@ -0,0 +1,71 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.embeddingsProperties = void 0;
4
+ exports.executeEmbeddings = executeEmbeddings;
5
+ const n8n_workflow_1 = require("n8n-workflow");
6
+ const shared_1 = require("./shared");
7
+ const showForEmbeddings = {
8
+ displayOptions: {
9
+ show: {
10
+ resource: ['embeddings'],
11
+ },
12
+ },
13
+ };
14
+ exports.embeddingsProperties = [
15
+ {
16
+ displayName: 'Model',
17
+ name: 'embeddingsModel',
18
+ type: 'options',
19
+ typeOptions: { loadOptionsMethod: 'getEmbeddingsModels' },
20
+ default: '',
21
+ required: true,
22
+ description: 'The Berget AI embedding model to use',
23
+ ...showForEmbeddings,
24
+ },
25
+ {
26
+ displayName: 'Input Text',
27
+ name: 'embeddingsInput',
28
+ type: 'string',
29
+ typeOptions: { rows: 4 },
30
+ default: '',
31
+ required: true,
32
+ description: 'Text to convert into an embedding vector',
33
+ ...showForEmbeddings,
34
+ },
35
+ {
36
+ displayName: 'Options',
37
+ name: 'embeddingsOptions',
38
+ type: 'collection',
39
+ placeholder: 'Add Option',
40
+ default: {},
41
+ options: [
42
+ {
43
+ displayName: 'Encoding Format',
44
+ name: 'encoding_format',
45
+ type: 'options',
46
+ options: [
47
+ { name: 'Float', value: 'float' },
48
+ { name: 'Base64', value: 'base64' },
49
+ ],
50
+ default: 'float',
51
+ description: 'Format of the returned embedding data',
52
+ },
53
+ ],
54
+ ...showForEmbeddings,
55
+ },
56
+ ];
57
+ async function executeEmbeddings(context, itemIndex) {
58
+ var _a, _b;
59
+ const credentials = await context.getCredentials('bergetAiApi');
60
+ const model = context.getNodeParameter('embeddingsModel', itemIndex);
61
+ const input = context.getNodeParameter('embeddingsInput', itemIndex);
62
+ const options = context.getNodeParameter('embeddingsOptions', itemIndex, {});
63
+ const { status, data } = await (0, shared_1.bergetRequest)(credentials.apiKey, 'POST', '/embeddings', { model, input, ...options });
64
+ if (status !== 200) {
65
+ const message = (_b = (_a = data === null || data === void 0 ? void 0 : data.error) === null || _a === void 0 ? void 0 : _a.message) !== null && _b !== void 0 ? _b : `HTTP ${status}`;
66
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), `Berget AI embeddings error: ${message}`, {
67
+ itemIndex,
68
+ });
69
+ }
70
+ return data;
71
+ }
@@ -0,0 +1,3 @@
1
+ import type { IDataObject, IExecuteFunctions, INodeProperties } from 'n8n-workflow';
2
+ export declare const ocrProperties: INodeProperties[];
3
+ export declare function executeOcr(context: IExecuteFunctions, itemIndex: number): Promise<IDataObject>;
@@ -0,0 +1,196 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ocrProperties = void 0;
4
+ exports.executeOcr = executeOcr;
5
+ const n8n_workflow_1 = require("n8n-workflow");
6
+ const shared_1 = require("./shared");
7
+ const showForOcr = {
8
+ displayOptions: {
9
+ show: {
10
+ resource: ['ocr'],
11
+ },
12
+ },
13
+ };
14
+ exports.ocrProperties = [
15
+ {
16
+ displayName: 'Document Type',
17
+ name: 'ocrDocumentType',
18
+ type: 'options',
19
+ options: [
20
+ { name: 'URL', value: 'url', description: 'Process a document from a URL' },
21
+ { name: 'Base64', value: 'base64', description: 'Process a base64-encoded document' },
22
+ ],
23
+ default: 'url',
24
+ description: 'How the document is provided',
25
+ ...showForOcr,
26
+ },
27
+ {
28
+ displayName: 'Document URL',
29
+ name: 'ocrDocumentUrl',
30
+ type: 'string',
31
+ default: '',
32
+ required: true,
33
+ description: 'URL of the document to process',
34
+ displayOptions: {
35
+ show: {
36
+ resource: ['ocr'],
37
+ ocrDocumentType: ['url'],
38
+ },
39
+ },
40
+ },
41
+ {
42
+ displayName: 'Document Data',
43
+ name: 'ocrDocumentData',
44
+ type: 'string',
45
+ typeOptions: { rows: 4 },
46
+ default: '',
47
+ required: true,
48
+ description: 'Base64-encoded document data',
49
+ displayOptions: {
50
+ show: {
51
+ resource: ['ocr'],
52
+ ocrDocumentType: ['base64'],
53
+ },
54
+ },
55
+ },
56
+ {
57
+ displayName: 'Processing Mode',
58
+ name: 'ocrAsync',
59
+ type: 'boolean',
60
+ default: false,
61
+ description: 'Whether to process the document asynchronously (recommended for large documents)',
62
+ ...showForOcr,
63
+ },
64
+ {
65
+ displayName: 'Options',
66
+ name: 'ocrOptions',
67
+ type: 'collection',
68
+ placeholder: 'Add Option',
69
+ default: {},
70
+ options: [
71
+ {
72
+ displayName: 'Output Format',
73
+ name: 'outputFormat',
74
+ type: 'options',
75
+ options: [
76
+ { name: 'Markdown', value: 'md' },
77
+ { name: 'JSON', value: 'json' },
78
+ ],
79
+ default: 'md',
80
+ description: 'Format for the extracted text',
81
+ },
82
+ {
83
+ displayName: 'Table Mode',
84
+ name: 'tableMode',
85
+ type: 'options',
86
+ options: [
87
+ { name: 'Accurate', value: 'accurate' },
88
+ { name: 'Fast', value: 'fast' },
89
+ ],
90
+ default: 'accurate',
91
+ description: 'Mode for table extraction',
92
+ },
93
+ {
94
+ displayName: 'OCR Method',
95
+ name: 'ocrMethod',
96
+ type: 'options',
97
+ options: [
98
+ { name: 'EasyOCR', value: 'easyocr' },
99
+ { name: 'Tesseract', value: 'tesseract' },
100
+ { name: 'OCR Mac', value: 'ocrmac' },
101
+ { name: 'RapidOCR', value: 'rapidocr' },
102
+ { name: 'TesserOCR', value: 'tesserocr' },
103
+ ],
104
+ default: 'easyocr',
105
+ description: 'OCR engine to use',
106
+ },
107
+ {
108
+ displayName: 'Perform OCR',
109
+ name: 'doOcr',
110
+ type: 'boolean',
111
+ default: true,
112
+ description: 'Whether to perform OCR on the document',
113
+ },
114
+ {
115
+ displayName: 'Extract Table Structure',
116
+ name: 'doTableStructure',
117
+ type: 'boolean',
118
+ default: true,
119
+ description: 'Whether to extract table structure',
120
+ },
121
+ {
122
+ displayName: 'Include Images',
123
+ name: 'includeImages',
124
+ type: 'boolean',
125
+ default: false,
126
+ description: 'Whether to include base64-encoded images in the output',
127
+ },
128
+ {
129
+ displayName: 'Input Formats',
130
+ name: 'inputFormat',
131
+ type: 'multiOptions',
132
+ options: [
133
+ { name: 'PDF', value: 'pdf' },
134
+ { name: 'HTML', value: 'html' },
135
+ { name: 'DOCX', value: 'docx' },
136
+ { name: 'PPTX', value: 'pptx' },
137
+ ],
138
+ default: ['pdf'],
139
+ description: 'Input formats to accept',
140
+ },
141
+ ],
142
+ ...showForOcr,
143
+ },
144
+ ];
145
+ async function executeOcr(context, itemIndex) {
146
+ var _a, _b, _c, _d, _e, _f, _g;
147
+ const credentials = await context.getCredentials('bergetAiApi');
148
+ const documentType = context.getNodeParameter('ocrDocumentType', itemIndex);
149
+ const asyncMode = context.getNodeParameter('ocrAsync', itemIndex);
150
+ const options = context.getNodeParameter('ocrOptions', itemIndex, {});
151
+ let documentUrl;
152
+ if (documentType === 'url') {
153
+ documentUrl = context.getNodeParameter('ocrDocumentUrl', itemIndex);
154
+ }
155
+ else {
156
+ const documentData = context.getNodeParameter('ocrDocumentData', itemIndex);
157
+ documentUrl = `data:application/pdf;base64,${documentData}`;
158
+ }
159
+ const body = {
160
+ document: { url: documentUrl, type: 'document' },
161
+ async: asyncMode,
162
+ options: {
163
+ outputFormat: (_a = options.outputFormat) !== null && _a !== void 0 ? _a : 'md',
164
+ tableMode: (_b = options.tableMode) !== null && _b !== void 0 ? _b : 'accurate',
165
+ ocrMethod: (_c = options.ocrMethod) !== null && _c !== void 0 ? _c : 'easyocr',
166
+ doOcr: options.doOcr !== false,
167
+ doTableStructure: options.doTableStructure !== false,
168
+ includeImages: (_d = options.includeImages) !== null && _d !== void 0 ? _d : false,
169
+ inputFormat: (_e = options.inputFormat) !== null && _e !== void 0 ? _e : ['pdf'],
170
+ },
171
+ };
172
+ const { status, data } = await (0, shared_1.bergetRequest)(credentials.apiKey, 'POST', '/ocr', body);
173
+ if (status === 200) {
174
+ const d = data;
175
+ return {
176
+ content: d.content,
177
+ usage: d.usage,
178
+ metadata: d.metadata,
179
+ processing_mode: 'synchronous',
180
+ };
181
+ }
182
+ if (status === 202) {
183
+ const d = data;
184
+ return {
185
+ taskId: d.taskId,
186
+ status: d.status,
187
+ resultUrl: d.resultUrl,
188
+ processing_mode: 'asynchronous',
189
+ message: 'Document processing started. Use the taskId to check status.',
190
+ };
191
+ }
192
+ const message = (_g = (_f = data === null || data === void 0 ? void 0 : data.error) === null || _f === void 0 ? void 0 : _f.message) !== null && _g !== void 0 ? _g : `HTTP ${status}`;
193
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), `Berget AI OCR error: ${message}`, {
194
+ itemIndex,
195
+ });
196
+ }
@@ -0,0 +1,3 @@
1
+ import type { IDataObject, IExecuteFunctions, INodeProperties } from 'n8n-workflow';
2
+ export declare const rerankProperties: INodeProperties[];
3
+ export declare function executeRerank(context: IExecuteFunctions, itemIndex: number): Promise<IDataObject>;