n8n-nodes-berget-mk 0.4.5 → 0.4.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/nodes/BergetAi/BergetAi.node.d.ts +1 -0
- package/dist/nodes/BergetAi/BergetAi.node.js +13 -0
- package/dist/nodes/BergetAi/image.d.ts +3 -0
- package/dist/nodes/BergetAi/image.js +168 -0
- package/dist/nodes/BergetAi/shared.d.ts +6 -0
- package/dist/nodes/BergetAiEmbeddingsModel/BergetAiEmbeddingsModel.node.js +9 -0
- package/package.json +1 -1
|
@@ -4,6 +4,7 @@ export declare class BergetAi implements INodeType {
|
|
|
4
4
|
methods: {
|
|
5
5
|
loadOptions: {
|
|
6
6
|
getChatModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
|
|
7
|
+
getVisionModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
|
|
7
8
|
getRerankModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
|
|
8
9
|
getSpeechModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
|
|
9
10
|
};
|
|
@@ -4,6 +4,7 @@ exports.BergetAi = void 0;
|
|
|
4
4
|
const n8n_workflow_1 = require("n8n-workflow");
|
|
5
5
|
const n8n_workflow_2 = require("n8n-workflow");
|
|
6
6
|
const chat_1 = require("./chat");
|
|
7
|
+
const image_1 = require("./image");
|
|
7
8
|
// OCR temporarily disabled — see the block comment below the BergetAi class
|
|
8
9
|
// header for the re-enable procedure.
|
|
9
10
|
// import { executeOcr, ocrProperties } from './ocr';
|
|
@@ -71,6 +72,11 @@ class BergetAi {
|
|
|
71
72
|
value: 'chat',
|
|
72
73
|
description: 'Create a chat completion',
|
|
73
74
|
},
|
|
75
|
+
{
|
|
76
|
+
name: 'Image Analysis',
|
|
77
|
+
value: 'image',
|
|
78
|
+
description: 'Ask a vision-capable model about an image',
|
|
79
|
+
},
|
|
74
80
|
// OCR: uncomment this block to re-enable the OCR resource.
|
|
75
81
|
// {
|
|
76
82
|
// name: 'OCR',
|
|
@@ -90,6 +96,7 @@ class BergetAi {
|
|
|
90
96
|
],
|
|
91
97
|
},
|
|
92
98
|
...chat_1.chatProperties,
|
|
99
|
+
...image_1.imageProperties,
|
|
93
100
|
// OCR: uncomment to re-enable the OCR resource properties.
|
|
94
101
|
// ...ocrProperties,
|
|
95
102
|
...rerank_1.rerankProperties,
|
|
@@ -101,6 +108,9 @@ class BergetAi {
|
|
|
101
108
|
async getChatModels() {
|
|
102
109
|
return (0, shared_1.loadModelOptions)(this, (m) => m.model_type === 'text' || m.model_type === 'ocr');
|
|
103
110
|
},
|
|
111
|
+
async getVisionModels() {
|
|
112
|
+
return (0, shared_1.loadModelOptions)(this, (m) => { var _a; return m.model_type === 'text' && ((_a = m.capabilities) === null || _a === void 0 ? void 0 : _a.vision) === true; });
|
|
113
|
+
},
|
|
104
114
|
async getRerankModels() {
|
|
105
115
|
return (0, shared_1.loadModelOptions)(this, (m) => m.model_type === 'rerank');
|
|
106
116
|
},
|
|
@@ -121,6 +131,9 @@ class BergetAi {
|
|
|
121
131
|
case 'chat':
|
|
122
132
|
result = await (0, chat_1.executeChat)(this, i);
|
|
123
133
|
break;
|
|
134
|
+
case 'image':
|
|
135
|
+
result = await (0, image_1.executeImage)(this, i);
|
|
136
|
+
break;
|
|
124
137
|
// OCR: uncomment to re-enable the OCR execute branch.
|
|
125
138
|
// case 'ocr':
|
|
126
139
|
// result = await executeOcr(this, i);
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.imageProperties = void 0;
|
|
4
|
+
exports.executeImage = executeImage;
|
|
5
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
|
6
|
+
const shared_1 = require("./shared");
|
|
7
|
+
const showForImage = {
|
|
8
|
+
displayOptions: {
|
|
9
|
+
show: {
|
|
10
|
+
resource: ['image'],
|
|
11
|
+
},
|
|
12
|
+
},
|
|
13
|
+
};
|
|
14
|
+
exports.imageProperties = [
|
|
15
|
+
{
|
|
16
|
+
displayName: 'Model',
|
|
17
|
+
name: 'imageModel',
|
|
18
|
+
type: 'options',
|
|
19
|
+
typeOptions: { loadOptionsMethod: 'getVisionModels' },
|
|
20
|
+
default: '',
|
|
21
|
+
required: true,
|
|
22
|
+
description: 'The Berget AI vision-capable chat model to use. Only models with capabilities.vision === true are shown.',
|
|
23
|
+
...showForImage,
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
displayName: 'Text Input',
|
|
27
|
+
name: 'imageText',
|
|
28
|
+
type: 'string',
|
|
29
|
+
typeOptions: { rows: 3 },
|
|
30
|
+
default: "What's in this image?",
|
|
31
|
+
description: 'The prompt sent alongside the image (e.g. "Describe the scene" or "Extract any visible text")',
|
|
32
|
+
...showForImage,
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
displayName: 'Input Type',
|
|
36
|
+
name: 'imageInputType',
|
|
37
|
+
type: 'options',
|
|
38
|
+
options: [
|
|
39
|
+
{
|
|
40
|
+
name: 'Binary File',
|
|
41
|
+
value: 'binary',
|
|
42
|
+
description: 'Read an image from the incoming item\'s binary data (most n8n workflows)',
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
name: 'Image URL',
|
|
46
|
+
value: 'url',
|
|
47
|
+
description: 'Fetch an image from a public URL',
|
|
48
|
+
},
|
|
49
|
+
],
|
|
50
|
+
default: 'binary',
|
|
51
|
+
description: 'How the image is provided',
|
|
52
|
+
...showForImage,
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
displayName: 'Input Data Field Name',
|
|
56
|
+
name: 'imageBinaryProperty',
|
|
57
|
+
type: 'string',
|
|
58
|
+
default: 'data',
|
|
59
|
+
required: true,
|
|
60
|
+
description: 'Name of the binary property on the incoming item that holds the image. Default is "data". When using a Form Trigger, set this to the form field name (e.g. "Image").',
|
|
61
|
+
displayOptions: {
|
|
62
|
+
show: {
|
|
63
|
+
resource: ['image'],
|
|
64
|
+
imageInputType: ['binary'],
|
|
65
|
+
},
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
displayName: 'Image URL',
|
|
70
|
+
name: 'imageUrl',
|
|
71
|
+
type: 'string',
|
|
72
|
+
default: '',
|
|
73
|
+
required: true,
|
|
74
|
+
description: 'Public URL of the image to analyze. Must be reachable from Berget AI\'s servers.',
|
|
75
|
+
displayOptions: {
|
|
76
|
+
show: {
|
|
77
|
+
resource: ['image'],
|
|
78
|
+
imageInputType: ['url'],
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
displayName: 'Options',
|
|
84
|
+
name: 'imageOptions',
|
|
85
|
+
type: 'collection',
|
|
86
|
+
placeholder: 'Add Option',
|
|
87
|
+
default: {},
|
|
88
|
+
options: [
|
|
89
|
+
{
|
|
90
|
+
displayName: 'Detail Level',
|
|
91
|
+
name: 'detail',
|
|
92
|
+
type: 'options',
|
|
93
|
+
options: [
|
|
94
|
+
{ name: 'Auto', value: 'auto' },
|
|
95
|
+
{ name: 'Low', value: 'low' },
|
|
96
|
+
{ name: 'High', value: 'high' },
|
|
97
|
+
],
|
|
98
|
+
default: 'auto',
|
|
99
|
+
description: "How carefully the model should analyze the image. \"High\" gives more detailed analysis but costs more tokens; \"low\" is cheaper and faster for simple tasks.",
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
displayName: 'Max Tokens',
|
|
103
|
+
name: 'max_tokens',
|
|
104
|
+
type: 'number',
|
|
105
|
+
typeOptions: { minValue: 1 },
|
|
106
|
+
default: 1024,
|
|
107
|
+
description: 'Maximum number of tokens to generate in the response',
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
displayName: 'Temperature',
|
|
111
|
+
name: 'temperature',
|
|
112
|
+
type: 'number',
|
|
113
|
+
typeOptions: { minValue: 0, maxValue: 2, numberPrecision: 2 },
|
|
114
|
+
default: 0.7,
|
|
115
|
+
description: 'Controls randomness. Lower is more deterministic.',
|
|
116
|
+
},
|
|
117
|
+
],
|
|
118
|
+
...showForImage,
|
|
119
|
+
},
|
|
120
|
+
];
|
|
121
|
+
async function executeImage(context, itemIndex) {
|
|
122
|
+
var _a, _b;
|
|
123
|
+
const credentials = await context.getCredentials('bergetAiApi');
|
|
124
|
+
const model = context.getNodeParameter('imageModel', itemIndex);
|
|
125
|
+
const text = context.getNodeParameter('imageText', itemIndex);
|
|
126
|
+
const inputType = context.getNodeParameter('imageInputType', itemIndex);
|
|
127
|
+
const options = context.getNodeParameter('imageOptions', itemIndex, {});
|
|
128
|
+
let imageUrlValue;
|
|
129
|
+
if (inputType === 'binary') {
|
|
130
|
+
const binaryPropertyName = context.getNodeParameter('imageBinaryProperty', itemIndex);
|
|
131
|
+
const binaryData = context.helpers.assertBinaryData(itemIndex, binaryPropertyName);
|
|
132
|
+
const buffer = await context.helpers.getBinaryDataBuffer(itemIndex, binaryPropertyName);
|
|
133
|
+
const mimeType = (_a = binaryData.mimeType) !== null && _a !== void 0 ? _a : 'image/png';
|
|
134
|
+
imageUrlValue = `data:${mimeType};base64,${buffer.toString('base64')}`;
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
imageUrlValue = context.getNodeParameter('imageUrl', itemIndex);
|
|
138
|
+
if (!imageUrlValue) {
|
|
139
|
+
throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'Berget AI image: Image URL is empty', { itemIndex });
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
const userMessage = {
|
|
143
|
+
role: 'user',
|
|
144
|
+
content: [
|
|
145
|
+
{ type: 'text', text },
|
|
146
|
+
{
|
|
147
|
+
type: 'image_url',
|
|
148
|
+
image_url: {
|
|
149
|
+
url: imageUrlValue,
|
|
150
|
+
detail: (_b = options.detail) !== null && _b !== void 0 ? _b : 'auto',
|
|
151
|
+
},
|
|
152
|
+
},
|
|
153
|
+
],
|
|
154
|
+
};
|
|
155
|
+
const body = {
|
|
156
|
+
model,
|
|
157
|
+
messages: [userMessage],
|
|
158
|
+
};
|
|
159
|
+
if (options.max_tokens !== undefined)
|
|
160
|
+
body.max_tokens = options.max_tokens;
|
|
161
|
+
if (options.temperature !== undefined)
|
|
162
|
+
body.temperature = options.temperature;
|
|
163
|
+
const { status, data } = await (0, shared_1.bergetRequest)(credentials.apiKey, 'POST', '/chat/completions', body);
|
|
164
|
+
if (status !== 200) {
|
|
165
|
+
throw new n8n_workflow_1.NodeOperationError(context.getNode(), (0, shared_1.formatBergetError)('image analysis', status, data), { itemIndex });
|
|
166
|
+
}
|
|
167
|
+
return data;
|
|
168
|
+
}
|
|
@@ -7,6 +7,12 @@ export interface BergetModel {
|
|
|
7
7
|
owned_by?: string;
|
|
8
8
|
capabilities?: {
|
|
9
9
|
function_calling?: boolean;
|
|
10
|
+
vision?: boolean;
|
|
11
|
+
json_mode?: boolean;
|
|
12
|
+
classification?: boolean;
|
|
13
|
+
embeddings?: boolean;
|
|
14
|
+
formatted_output?: boolean;
|
|
15
|
+
streaming?: boolean;
|
|
10
16
|
};
|
|
11
17
|
}
|
|
12
18
|
export declare function fetchBergetModels(context: ILoadOptionsFunctions): Promise<BergetModel[]>;
|
|
@@ -59,6 +59,14 @@ class BergetAiEmbeddingsModel {
|
|
|
59
59
|
default: 512,
|
|
60
60
|
description: 'Number of documents to embed per API request',
|
|
61
61
|
},
|
|
62
|
+
{
|
|
63
|
+
displayName: 'Dimensions',
|
|
64
|
+
name: 'dimensions',
|
|
65
|
+
type: 'number',
|
|
66
|
+
typeOptions: { minValue: 1 },
|
|
67
|
+
default: 1024,
|
|
68
|
+
description: "The number of dimensions the resulting embedding vectors should have. When omitted or left at the model's native size, the full embedding is returned. Berget's default embedding model intfloat/multilingual-e5-large-instruct produces 1024-dimensional vectors natively. Lower values produce smaller vectors at some cost to retrieval quality — useful when storing many embeddings or when your vector store has a fixed dimension. Must match the dimension your Vector Store is configured for, or indexing will fail.",
|
|
69
|
+
},
|
|
62
70
|
{
|
|
63
71
|
displayName: 'Strip New Lines',
|
|
64
72
|
name: 'stripNewLines',
|
|
@@ -115,6 +123,7 @@ class BergetAiEmbeddingsModel {
|
|
|
115
123
|
batchSize: (_a = options.batchSize) !== null && _a !== void 0 ? _a : 512,
|
|
116
124
|
stripNewLines: (_b = options.stripNewLines) !== null && _b !== void 0 ? _b : true,
|
|
117
125
|
timeout: (_c = options.timeout) !== null && _c !== void 0 ? _c : 60000,
|
|
126
|
+
...(options.dimensions ? { dimensions: options.dimensions } : {}),
|
|
118
127
|
});
|
|
119
128
|
return { response: embeddings };
|
|
120
129
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "n8n-nodes-berget-mk",
|
|
3
|
-
"version": "0.4.
|
|
3
|
+
"version": "0.4.7",
|
|
4
4
|
"description": "n8n community node for Berget AI. Multi-resource action node (chat, OCR, rerank, speech-to-text) plus Chat Model and Embeddings Model sub-nodes that plug into n8n's built-in AI Agent and Vector Store nodes.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"n8n-community-node-package",
|