@n8n/n8n-nodes-langchain 1.114.1 → 1.115.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js +1 -1
  2. package/dist/credentials/AzureEntraCognitiveServicesOAuth2Api.credentials.js.map +1 -1
  3. package/dist/known/credentials.json +1 -0
  4. package/dist/known/nodes.json +8 -0
  5. package/dist/nodes/vector_store/VectorStoreRedis/VectorStoreRedis.node.js +339 -0
  6. package/dist/nodes/vector_store/VectorStoreRedis/VectorStoreRedis.node.js.map +1 -0
  7. package/dist/nodes/vector_store/VectorStoreRedis/redis.dark.svg +37 -0
  8. package/dist/nodes/vector_store/VectorStoreRedis/redis.svg +37 -0
  9. package/dist/nodes/vector_store/shared/createVectorStoreNode/createVectorStoreNode.js +15 -1
  10. package/dist/nodes/vector_store/shared/createVectorStoreNode/createVectorStoreNode.js.map +1 -1
  11. package/dist/nodes/vector_store/shared/createVectorStoreNode/operations/index.js +3 -1
  12. package/dist/nodes/vector_store/shared/createVectorStoreNode/operations/index.js.map +1 -1
  13. package/dist/nodes/vector_store/shared/createVectorStoreNode/operations/retrieveAsToolExecuteOperation.js +98 -0
  14. package/dist/nodes/vector_store/shared/createVectorStoreNode/operations/retrieveAsToolExecuteOperation.js.map +1 -0
  15. package/dist/nodes/vendors/Ollama/Ollama.node.js +42 -0
  16. package/dist/nodes/vendors/Ollama/Ollama.node.js.map +1 -0
  17. package/dist/nodes/vendors/Ollama/actions/descriptions.js +52 -0
  18. package/dist/nodes/vendors/Ollama/actions/descriptions.js.map +1 -0
  19. package/dist/nodes/vendors/Ollama/actions/image/analyze.operation.js +412 -0
  20. package/dist/nodes/vendors/Ollama/actions/image/analyze.operation.js.map +1 -0
  21. package/dist/nodes/vendors/Ollama/actions/image/index.js +64 -0
  22. package/dist/nodes/vendors/Ollama/actions/image/index.js.map +1 -0
  23. package/dist/nodes/vendors/Ollama/actions/node.type.js +17 -0
  24. package/dist/nodes/vendors/Ollama/actions/node.type.js.map +1 -0
  25. package/dist/nodes/vendors/Ollama/actions/router.js +78 -0
  26. package/dist/nodes/vendors/Ollama/actions/router.js.map +1 -0
  27. package/dist/nodes/vendors/Ollama/actions/text/index.js +64 -0
  28. package/dist/nodes/vendors/Ollama/actions/text/index.js.map +1 -0
  29. package/dist/nodes/vendors/Ollama/actions/text/message.operation.js +440 -0
  30. package/dist/nodes/vendors/Ollama/actions/text/message.operation.js.map +1 -0
  31. package/dist/nodes/vendors/Ollama/actions/versionDescription.js +107 -0
  32. package/dist/nodes/vendors/Ollama/actions/versionDescription.js.map +1 -0
  33. package/dist/nodes/vendors/Ollama/helpers/index.js +17 -0
  34. package/dist/nodes/vendors/Ollama/helpers/index.js.map +1 -0
  35. package/dist/nodes/vendors/Ollama/helpers/interfaces.js +17 -0
  36. package/dist/nodes/vendors/Ollama/helpers/interfaces.js.map +1 -0
  37. package/dist/nodes/vendors/Ollama/methods/index.js +39 -0
  38. package/dist/nodes/vendors/Ollama/methods/index.js.map +1 -0
  39. package/dist/nodes/vendors/Ollama/methods/listSearch.js +39 -0
  40. package/dist/nodes/vendors/Ollama/methods/listSearch.js.map +1 -0
  41. package/dist/nodes/vendors/Ollama/ollama.svg +1 -0
  42. package/dist/nodes/vendors/Ollama/transport/index.js +56 -0
  43. package/dist/nodes/vendors/Ollama/transport/index.js.map +1 -0
  44. package/dist/types/credentials.json +2 -2
  45. package/dist/types/nodes.json +2 -0
  46. package/package.json +11 -8
@@ -0,0 +1,440 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+ var message_operation_exports = {};
20
+ __export(message_operation_exports, {
21
+ description: () => description,
22
+ execute: () => execute
23
+ });
24
+ module.exports = __toCommonJS(message_operation_exports);
25
+ var import_n8n_workflow = require("n8n-workflow");
26
+ var import_zod_to_json_schema = require("zod-to-json-schema");
27
+ var import_helpers = require("../../../../../utils/helpers");
28
+ var import_transport = require("../../transport");
29
+ var import_descriptions = require("../descriptions");
30
+ const properties = [
31
+ import_descriptions.modelRLC,
32
+ {
33
+ displayName: "Messages",
34
+ name: "messages",
35
+ type: "fixedCollection",
36
+ typeOptions: {
37
+ sortable: true,
38
+ multipleValues: true
39
+ },
40
+ placeholder: "Add Message",
41
+ default: { values: [{ content: "", role: "user" }] },
42
+ options: [
43
+ {
44
+ displayName: "Values",
45
+ name: "values",
46
+ values: [
47
+ {
48
+ displayName: "Content",
49
+ name: "content",
50
+ type: "string",
51
+ description: "The content of the message to be sent",
52
+ default: "",
53
+ placeholder: "e.g. Hello, how can you help me?",
54
+ typeOptions: {
55
+ rows: 2
56
+ }
57
+ },
58
+ {
59
+ displayName: "Role",
60
+ name: "role",
61
+ type: "options",
62
+ description: "The role of this message in the conversation",
63
+ options: [
64
+ {
65
+ name: "User",
66
+ value: "user",
67
+ description: "Message from the user"
68
+ },
69
+ {
70
+ name: "Assistant",
71
+ value: "assistant",
72
+ description: "Response from the assistant (for conversation history)"
73
+ }
74
+ ],
75
+ default: "user"
76
+ }
77
+ ]
78
+ }
79
+ ]
80
+ },
81
+ {
82
+ displayName: "Simplify Output",
83
+ name: "simplify",
84
+ type: "boolean",
85
+ default: true,
86
+ description: "Whether to simplify the response or not"
87
+ },
88
+ {
89
+ displayName: "Options",
90
+ name: "options",
91
+ placeholder: "Add Option",
92
+ type: "collection",
93
+ default: {},
94
+ options: [
95
+ {
96
+ displayName: "System Message",
97
+ name: "system",
98
+ type: "string",
99
+ default: "",
100
+ placeholder: "e.g. You are a helpful assistant.",
101
+ description: "System message to set the context for the conversation",
102
+ typeOptions: {
103
+ rows: 2
104
+ }
105
+ },
106
+ {
107
+ displayName: "Temperature",
108
+ name: "temperature",
109
+ type: "number",
110
+ default: 0.8,
111
+ typeOptions: {
112
+ minValue: 0,
113
+ maxValue: 2,
114
+ numberPrecision: 2
115
+ },
116
+ description: "Controls randomness in responses. Lower values make output more focused."
117
+ },
118
+ {
119
+ displayName: "Output Randomness (Top P)",
120
+ name: "top_p",
121
+ default: 0.7,
122
+ description: "The maximum cumulative probability of tokens to consider when sampling",
123
+ type: "number",
124
+ typeOptions: {
125
+ minValue: 0,
126
+ maxValue: 1,
127
+ numberPrecision: 1
128
+ }
129
+ },
130
+ {
131
+ displayName: "Top K",
132
+ name: "top_k",
133
+ type: "number",
134
+ default: 40,
135
+ typeOptions: {
136
+ minValue: 1
137
+ },
138
+ description: "Controls diversity by limiting the number of top tokens to consider"
139
+ },
140
+ {
141
+ displayName: "Max Tokens",
142
+ name: "num_predict",
143
+ type: "number",
144
+ default: 1024,
145
+ typeOptions: {
146
+ minValue: 1,
147
+ numberPrecision: 0
148
+ },
149
+ description: "Maximum number of tokens to generate in the completion"
150
+ },
151
+ {
152
+ displayName: "Frequency Penalty",
153
+ name: "frequency_penalty",
154
+ type: "number",
155
+ default: 0,
156
+ typeOptions: {
157
+ minValue: 0,
158
+ numberPrecision: 2
159
+ },
160
+ description: "Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."
161
+ },
162
+ {
163
+ displayName: "Presence Penalty",
164
+ name: "presence_penalty",
165
+ type: "number",
166
+ default: 0,
167
+ typeOptions: {
168
+ numberPrecision: 2
169
+ },
170
+ description: "Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."
171
+ },
172
+ {
173
+ displayName: "Repetition Penalty",
174
+ name: "repeat_penalty",
175
+ type: "number",
176
+ default: 1.1,
177
+ typeOptions: {
178
+ minValue: 0,
179
+ numberPrecision: 2
180
+ },
181
+ description: "Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient."
182
+ },
183
+ {
184
+ displayName: "Context Length",
185
+ name: "num_ctx",
186
+ type: "number",
187
+ default: 4096,
188
+ typeOptions: {
189
+ minValue: 1,
190
+ numberPrecision: 0
191
+ },
192
+ description: "Sets the size of the context window used to generate the next token"
193
+ },
194
+ {
195
+ displayName: "Repeat Last N",
196
+ name: "repeat_last_n",
197
+ type: "number",
198
+ default: 64,
199
+ typeOptions: {
200
+ minValue: -1,
201
+ numberPrecision: 0
202
+ },
203
+ description: "Sets how far back for the model to look back to prevent repetition. (0 = disabled, -1 = num_ctx)."
204
+ },
205
+ {
206
+ displayName: "Min P",
207
+ name: "min_p",
208
+ type: "number",
209
+ default: 0,
210
+ typeOptions: {
211
+ minValue: 0,
212
+ maxValue: 1,
213
+ numberPrecision: 3
214
+ },
215
+ description: "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token."
216
+ },
217
+ {
218
+ displayName: "Seed",
219
+ name: "seed",
220
+ type: "number",
221
+ default: 0,
222
+ typeOptions: {
223
+ minValue: 0,
224
+ numberPrecision: 0
225
+ },
226
+ description: "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt."
227
+ },
228
+ {
229
+ displayName: "Stop Sequences",
230
+ name: "stop",
231
+ type: "string",
232
+ default: "",
233
+ description: "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Separate multiple patterns with commas"
234
+ },
235
+ {
236
+ displayName: "Keep Alive",
237
+ name: "keep_alive",
238
+ type: "string",
239
+ default: "5m",
240
+ description: "Specifies the duration to keep the loaded model in memory after use. Format: 1h30m (1 hour 30 minutes)."
241
+ },
242
+ {
243
+ displayName: "Low VRAM Mode",
244
+ name: "low_vram",
245
+ type: "boolean",
246
+ default: false,
247
+ description: "Whether to activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."
248
+ },
249
+ {
250
+ displayName: "Main GPU ID",
251
+ name: "main_gpu",
252
+ type: "number",
253
+ default: 0,
254
+ typeOptions: {
255
+ minValue: 0,
256
+ numberPrecision: 0
257
+ },
258
+ description: "Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."
259
+ },
260
+ {
261
+ displayName: "Context Batch Size",
262
+ name: "num_batch",
263
+ type: "number",
264
+ default: 512,
265
+ typeOptions: {
266
+ minValue: 1,
267
+ numberPrecision: 0
268
+ },
269
+ description: "Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."
270
+ },
271
+ {
272
+ displayName: "Number of GPUs",
273
+ name: "num_gpu",
274
+ type: "number",
275
+ default: -1,
276
+ typeOptions: {
277
+ minValue: -1,
278
+ numberPrecision: 0
279
+ },
280
+ description: "Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."
281
+ },
282
+ {
283
+ displayName: "Number of CPU Threads",
284
+ name: "num_thread",
285
+ type: "number",
286
+ default: 0,
287
+ typeOptions: {
288
+ minValue: 0,
289
+ numberPrecision: 0
290
+ },
291
+ description: "Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."
292
+ },
293
+ {
294
+ displayName: "Penalize Newlines",
295
+ name: "penalize_newline",
296
+ type: "boolean",
297
+ default: true,
298
+ description: "Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"
299
+ },
300
+ {
301
+ displayName: "Use Memory Locking",
302
+ name: "use_mlock",
303
+ type: "boolean",
304
+ default: false,
305
+ description: "Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."
306
+ },
307
+ {
308
+ displayName: "Use Memory Mapping",
309
+ name: "use_mmap",
310
+ type: "boolean",
311
+ default: true,
312
+ description: "Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance."
313
+ },
314
+ {
315
+ displayName: "Load Vocabulary Only",
316
+ name: "vocab_only",
317
+ type: "boolean",
318
+ default: false,
319
+ description: "Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."
320
+ },
321
+ {
322
+ displayName: "Output Format",
323
+ name: "format",
324
+ type: "options",
325
+ options: [
326
+ { name: "Default", value: "" },
327
+ { name: "JSON", value: "json" }
328
+ ],
329
+ default: "",
330
+ description: "Specifies the format of the API response"
331
+ }
332
+ ]
333
+ }
334
+ ];
335
+ const displayOptions = {
336
+ show: {
337
+ operation: ["message"],
338
+ resource: ["text"]
339
+ }
340
+ };
341
+ const description = (0, import_n8n_workflow.updateDisplayOptions)(displayOptions, properties);
342
+ async function execute(i) {
343
+ const model = this.getNodeParameter("modelId", i, "", { extractValue: true });
344
+ const messages = this.getNodeParameter("messages.values", i, []);
345
+ const simplify = this.getNodeParameter("simplify", i, true);
346
+ const options = this.getNodeParameter("options", i, {});
347
+ const { tools, connectedTools } = await getTools.call(this);
348
+ if (options.system) {
349
+ messages.unshift({
350
+ role: "system",
351
+ content: options.system
352
+ });
353
+ }
354
+ delete options.system;
355
+ const processedOptions = { ...options };
356
+ if (processedOptions.stop && typeof processedOptions.stop === "string") {
357
+ processedOptions.stop = processedOptions.stop.split(",").map((s) => s.trim()).filter(Boolean);
358
+ }
359
+ const body = {
360
+ model,
361
+ messages,
362
+ stream: false,
363
+ tools,
364
+ options: processedOptions
365
+ };
366
+ let response = await import_transport.apiRequest.call(this, "POST", "/api/chat", {
367
+ body
368
+ });
369
+ if (tools.length > 0 && response.message.tool_calls && response.message.tool_calls.length > 0) {
370
+ const toolCalls = response.message.tool_calls;
371
+ messages.push(response.message);
372
+ for (const toolCall of toolCalls) {
373
+ let toolResponse = "";
374
+ let toolFound = false;
375
+ for (const tool of connectedTools) {
376
+ if (tool.name === toolCall.function.name) {
377
+ toolFound = true;
378
+ try {
379
+ const result = await tool.invoke(toolCall.function.arguments);
380
+ toolResponse = typeof result === "object" && result !== null ? JSON.stringify(result) : String(result);
381
+ } catch (error) {
382
+ toolResponse = `Error executing tool: ${error instanceof Error ? error.message : "Unknown error"}`;
383
+ }
384
+ break;
385
+ }
386
+ }
387
+ if (!toolFound) {
388
+ toolResponse = `Error: Tool '${toolCall.function.name}' not found`;
389
+ }
390
+ messages.push({
391
+ role: "tool",
392
+ content: toolResponse,
393
+ tool_name: toolCall.function.name
394
+ });
395
+ }
396
+ const updatedBody = {
397
+ ...body,
398
+ messages
399
+ };
400
+ response = await import_transport.apiRequest.call(this, "POST", "/api/chat", {
401
+ body: updatedBody
402
+ });
403
+ }
404
+ if (simplify) {
405
+ return [
406
+ {
407
+ json: { content: response.message.content },
408
+ pairedItem: { item: i }
409
+ }
410
+ ];
411
+ }
412
+ return [
413
+ {
414
+ json: { ...response },
415
+ pairedItem: { item: i }
416
+ }
417
+ ];
418
+ }
419
+ async function getTools() {
420
+ let connectedTools = [];
421
+ const nodeInputs = this.getNodeInputs();
422
+ if (nodeInputs.some((input) => input.type === "ai_tool")) {
423
+ connectedTools = await (0, import_helpers.getConnectedTools)(this, true);
424
+ }
425
+ const tools = connectedTools.map((tool) => ({
426
+ type: "function",
427
+ function: {
428
+ name: tool.name,
429
+ description: tool.description,
430
+ parameters: (0, import_zod_to_json_schema.zodToJsonSchema)(tool.schema)
431
+ }
432
+ }));
433
+ return { tools, connectedTools };
434
+ }
435
+ // Annotate the CommonJS export names for ESM import in node:
436
+ 0 && (module.exports = {
437
+ description,
438
+ execute
439
+ });
440
+ //# sourceMappingURL=message.operation.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../../../nodes/vendors/Ollama/actions/text/message.operation.ts"],"sourcesContent":["import type { Tool } from '@langchain/core/tools';\nimport type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\nimport { zodToJsonSchema } from 'zod-to-json-schema';\n\nimport { getConnectedTools } from '@utils/helpers';\n\nimport type { OllamaChatResponse, OllamaMessage, OllamaTool } from '../../helpers';\nimport { apiRequest } from '../../transport';\nimport { modelRLC } from '../descriptions';\n\nconst properties: INodeProperties[] = [\n\tmodelRLC,\n\t{\n\t\tdisplayName: 'Messages',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tsortable: true,\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tplaceholder: 'Add Message',\n\t\tdefault: { values: [{ content: '', role: 'user' }] },\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Values',\n\t\t\t\tname: 'values',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Content',\n\t\t\t\t\t\tname: 'content',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdescription: 'The content of the message to be sent',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\trows: 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Role',\n\t\t\t\t\t\tname: 'role',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdescription: 'The role of this message in the conversation',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: 'user',\n\t\t\t\t\t\t\t\tdescription: 'Message from the user',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Assistant',\n\t\t\t\t\t\t\t\tvalue: 'assistant',\n\t\t\t\t\t\t\t\tdescription: 'Response from the assistant (for conversation history)',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'user',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\t{\n\t\tdisplayName: 'Simplify Output',\n\t\tname: 'simplify',\n\t\ttype: 'boolean',\n\t\tdefault: true,\n\t\tdescription: 'Whether to simplify the response or not',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'System Message',\n\t\t\t\tname: 'system',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tplaceholder: 'e.g. You are a helpful assistant.',\n\t\t\t\tdescription: 'System message to set the context for the conversation',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\trows: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Temperature',\n\t\t\t\tname: 'temperature',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0.8,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 2,\n\t\t\t\t\tnumberPrecision: 2,\n\t\t\t\t},\n\t\t\t\tdescription: 'Controls randomness in responses. Lower values make output more focused.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Top P)',\n\t\t\t\tname: 'top_p',\n\t\t\t\tdefault: 0.7,\n\t\t\t\tdescription: 'The maximum cumulative probability of tokens to consider when sampling',\n\t\t\t\ttype: 'number',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 1,\n\t\t\t\t\tnumberPrecision: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Top K',\n\t\t\t\tname: 'top_k',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 40,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 1,\n\t\t\t\t},\n\t\t\t\tdescription: 'Controls diversity by limiting the number of top tokens to consider',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Max Tokens',\n\t\t\t\tname: 'num_predict',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 1024,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 1,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription: 'Maximum number of tokens to generate in the completion',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\tname: 'frequency_penalty',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0.0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tnumberPrecision: 2,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\tname: 'presence_penalty',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0.0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tnumberPrecision: 2,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Repetition Penalty',\n\t\t\t\tname: 'repeat_penalty',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 1.1,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tnumberPrecision: 2,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Context Length',\n\t\t\t\tname: 'num_ctx',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 4096,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 1,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription: 'Sets the size of the context window used to generate the next token',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Repeat Last N',\n\t\t\t\tname: 'repeat_last_n',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 64,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: -1,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Sets how far back for the model to look back to prevent repetition. (0 = disabled, -1 = num_ctx).',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Min P',\n\t\t\t\tname: 'min_p',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0.0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 1,\n\t\t\t\t\tnumberPrecision: 3,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Seed',\n\t\t\t\tname: 'seed',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Stop Sequences',\n\t\t\t\tname: 'stop',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tdescription:\n\t\t\t\t\t'Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Separate multiple patterns with commas',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Keep Alive',\n\t\t\t\tname: 'keep_alive',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '5m',\n\t\t\t\tdescription:\n\t\t\t\t\t'Specifies the duration to keep the loaded model in memory after use. Format: 1h30m (1 hour 30 minutes).',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Low VRAM Mode',\n\t\t\t\tname: 'low_vram',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: false,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether to activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Main GPU ID',\n\t\t\t\tname: 'main_gpu',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Context Batch Size',\n\t\t\t\tname: 'num_batch',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 512,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 1,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Number of GPUs',\n\t\t\t\tname: 'num_gpu',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: -1,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: -1,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Number of CPU Threads',\n\t\t\t\tname: 'num_thread',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tnumberPrecision: 0,\n\t\t\t\t},\n\t\t\t\tdescription:\n\t\t\t\t\t'Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Penalize Newlines',\n\t\t\t\tname: 'penalize_newline',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: true,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Use Memory Locking',\n\t\t\t\tname: 'use_mlock',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: false,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Use Memory Mapping',\n\t\t\t\tname: 'use_mmap',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: true,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Load Vocabulary Only',\n\t\t\t\tname: 'vocab_only',\n\t\t\t\ttype: 'boolean',\n\t\t\t\tdefault: false,\n\t\t\t\tdescription:\n\t\t\t\t\t'Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization.',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Format',\n\t\t\t\tname: 'format',\n\t\t\t\ttype: 'options',\n\t\t\t\toptions: [\n\t\t\t\t\t{ name: 'Default', value: '' },\n\t\t\t\t\t{ name: 'JSON', value: 'json' },\n\t\t\t\t],\n\t\t\t\tdefault: '',\n\t\t\t\tdescription: 'Specifies the format of the API response',\n\t\t\t},\n\t\t],\n\t},\n];\n\ninterface MessageOptions {\n\tsystem?: string;\n\ttemperature?: number;\n\ttop_p?: number;\n\ttop_k?: number;\n\tnum_predict?: number;\n\tfrequency_penalty?: number;\n\tpresence_penalty?: number;\n\trepeat_penalty?: number;\n\tnum_ctx?: number;\n\trepeat_last_n?: number;\n\tmin_p?: number;\n\tseed?: number;\n\tstop?: string | string[];\n\tlow_vram?: boolean;\n\tmain_gpu?: number;\n\tnum_batch?: number;\n\tnum_gpu?: number;\n\tnum_thread?: number;\n\tpenalize_newline?: boolean;\n\tuse_mlock?: boolean;\n\tuse_mmap?: boolean;\n\tvocab_only?: boolean;\n\tformat?: string;\n\tkeep_alive?: string;\n}\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['message'],\n\t\tresource: ['text'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;\n\tconst messages = this.getNodeParameter('messages.values', i, []) as OllamaMessage[];\n\tconst simplify = this.getNodeParameter('simplify', i, true) as boolean;\n\tconst options = this.getNodeParameter('options', i, {}) as MessageOptions;\n\tconst { tools, connectedTools } = await getTools.call(this);\n\n\tif (options.system) {\n\t\tmessages.unshift({\n\t\t\trole: 'system',\n\t\t\tcontent: options.system,\n\t\t});\n\t}\n\n\tdelete options.system;\n\n\tconst processedOptions = { ...options };\n\tif (processedOptions.stop && typeof processedOptions.stop === 'string') {\n\t\tprocessedOptions.stop = processedOptions.stop\n\t\t\t.split(',')\n\t\t\t.map((s: string) => s.trim())\n\t\t\t.filter(Boolean);\n\t}\n\n\tconst body = {\n\t\tmodel,\n\t\tmessages,\n\t\tstream: false,\n\t\ttools,\n\t\toptions: processedOptions,\n\t};\n\n\tlet response: OllamaChatResponse = await apiRequest.call(this, 'POST', '/api/chat', {\n\t\tbody,\n\t});\n\n\tif (tools.length > 0 && response.message.tool_calls && response.message.tool_calls.length > 0) {\n\t\tconst toolCalls = response.message.tool_calls;\n\n\t\tmessages.push(response.message);\n\n\t\tfor (const toolCall of toolCalls) {\n\t\t\tlet toolResponse = '';\n\t\t\tlet toolFound = false;\n\n\t\t\tfor (const tool of connectedTools) {\n\t\t\t\tif (tool.name === toolCall.function.name) {\n\t\t\t\t\ttoolFound = true;\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst result: unknown = await tool.invoke(toolCall.function.arguments);\n\t\t\t\t\t\ttoolResponse =\n\t\t\t\t\t\t\ttypeof result === 'object' && result !== null\n\t\t\t\t\t\t\t\t? JSON.stringify(result)\n\t\t\t\t\t\t\t\t: String(result);\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\ttoolResponse = `Error executing tool: ${error instanceof Error ? error.message : 'Unknown error'}`;\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Add tool response even if tool wasn't found to prevent silent failure\n\t\t\tif (!toolFound) {\n\t\t\t\ttoolResponse = `Error: Tool '${toolCall.function.name}' not found`;\n\t\t\t}\n\n\t\t\tmessages.push({\n\t\t\t\trole: 'tool',\n\t\t\t\tcontent: toolResponse,\n\t\t\t\ttool_name: toolCall.function.name,\n\t\t\t});\n\t\t}\n\n\t\tconst updatedBody = {\n\t\t\t...body,\n\t\t\tmessages,\n\t\t};\n\n\t\tresponse = await apiRequest.call(this, 'POST', '/api/chat', {\n\t\t\tbody: updatedBody,\n\t\t});\n\t}\n\n\tif (simplify) {\n\t\treturn [\n\t\t\t{\n\t\t\t\tjson: { content: response.message.content },\n\t\t\t\tpairedItem: { item: i },\n\t\t\t},\n\t\t];\n\t}\n\n\treturn [\n\t\t{\n\t\t\tjson: { ...response },\n\t\t\tpairedItem: { item: i },\n\t\t},\n\t];\n}\n\nasync function getTools(this: IExecuteFunctions) {\n\tlet connectedTools: Tool[] = [];\n\tconst nodeInputs = this.getNodeInputs();\n\n\tif (nodeInputs.some((input) => input.type === 'ai_tool')) {\n\t\tconnectedTools = await getConnectedTools(this, true);\n\t}\n\n\tconst tools: OllamaTool[] = connectedTools.map((tool) => ({\n\t\ttype: 'function',\n\t\tfunction: {\n\t\t\tname: tool.name,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: zodToJsonSchema(tool.schema),\n\t\t},\n\t}));\n\n\treturn { tools, connectedTools };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEA,0BAAqC;AACrC,gCAAgC;AAEhC,qBAAkC;AAGlC,uBAA2B;AAC3B,0BAAyB;AAEzB,MAAM,aAAgC;AAAA,EACrC;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,UAAU;AAAA,MACV,gBAAgB;AAAA,IACjB;AAAA,IACA,aAAa;AAAA,IACb,SAAS,EAAE,QAAQ,CAAC,EAAE,SAAS,IAAI,MAAM,OAAO,CAAC,EAAE;AAAA,IACnD,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,aAAa;AAAA,YACb,SAAS;AAAA,YACT,aAAa;AAAA,YACb,aAAa;AAAA,cACZ,MAAM;AAAA,YACP;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,aAAa;AAAA,YACb,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,EACd;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,QACb,aAAa;AAAA,UACZ,MAAM;AAAA,QACP;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,MAAM;AAAA,QACN,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,QACX;AAAA,QACA,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,QACA,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,MACF;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,UACR,EAAE,MAAM,WAAW,OAAO,GAAG;AAAA,UAC7B,EAAE,MAAM,QAAQ,OAAO,OAAO;AAAA,QAC/B;AAAA,QACA,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,IACD;AAAA,EACD;AACD;AA6BA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,SAAS;AAAA,IACrB,UAAU,CAAC,MAAM;AAAA,EAClB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,QAAQ,KAAK,iBAAiB,WAAW,GAAG,IAAI,EAAE,cAAc,KAAK,CAAC;AAC5E,QAAM,WAAW,KAAK,iBAAiB,mBAAmB,GAAG,CAAC,CAAC;AAC/D,QAAM,WAAW,KAAK,iBAAiB,YAAY,GAAG,IAAI;AAC1D,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AACtD,QAAM,EAAE,OAAO,eAAe,IAAI,MAAM,SAAS,KAAK,IAAI;AAE1D,MAAI,QAAQ,QAAQ;AACnB,aAAS,QAAQ;AAAA,MAChB,MAAM;AAAA,MACN,SAAS,QAAQ;AAAA,IAClB,CAAC;AAAA,EACF;AAEA,SAAO,QAAQ;AAEf,QAAM,mBAAmB,EAAE,GAAG,QAAQ;AACtC,MAAI,iBAAiB,QAAQ,OAAO,iBAAiB,SAAS,UAAU;AACvE,qBAAiB,OAAO,iBAAiB,KACvC,MAAM,GAAG,EACT,IAAI,CAAC,MAAc,EAAE,KAAK,CAAC,EAC3B,OAAO,OAAO;AAAA,EACjB;AAEA,QAAM,OAAO;AAAA,IACZ;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,SAAS;AAAA,EACV;AAEA,MAAI,WAA+B,MAAM,4BAAW,KAAK,MAAM,QAAQ,aAAa;AAAA,IACnF;AAAA,EACD,CAAC;AAED,MAAI,MAAM,SAAS,KAAK,SAAS,QAAQ,cAAc,SAAS,QAAQ,WAAW,SAAS,GAAG;AAC9F,UAAM,YAAY,SAAS,QAAQ;AAEnC,aAAS,KAAK,SAAS,OAAO;AAE9B,eAAW,YAAY,WAAW;AACjC,UAAI,eAAe;AACnB,UAAI,YAAY;AAEhB,iBAAW,QAAQ,gBAAgB;AAClC,YAAI,KAAK,SAAS,SAAS,SAAS,MAAM;AACzC,sBAAY;AACZ,cAAI;AACH,kBAAM,SAAkB,MAAM,KAAK,OAAO,SAAS,SAAS,SAAS;AACrE,2BACC,OAAO,WAAW,YAAY,WAAW,OACtC,KAAK,UAAU,MAAM,IACrB,OAAO,MAAM;AAAA,UAClB,SAAS,OAAO;AACf,2BAAe,yBAAyB,iBAAiB,QAAQ,MAAM,UAAU,eAAe;AAAA,UACjG;AACA;AAAA,QACD;AAAA,MACD;AAGA,UAAI,CAAC,WAAW;AACf,uBAAe,gBAAgB,SAAS,SAAS,IAAI;AAAA,MACtD;AAEA,eAAS,KAAK;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,WAAW,SAAS,SAAS;AAAA,MAC9B,CAAC;AAAA,IACF;AAEA,UAAM,cAAc;AAAA,MACnB,GAAG;AAAA,MACH;AAAA,IACD;AAEA,eAAW,MAAM,4BAAW,KAAK,MAAM,QAAQ,aAAa;AAAA,MAC3D,MAAM;AAAA,IACP,CAAC;AAAA,EACF;AAEA,MAAI,UAAU;AACb,WAAO;AAAA,MACN;AAAA,QACC,MAAM,EAAE,SAAS,SAAS,QAAQ,QAAQ;AAAA,QAC1C,YAAY,EAAE,MAAM,EAAE;AAAA,MACvB;AAAA,IACD;AAAA,EACD;AAEA,SAAO;AAAA,IACN;AAAA,MACC,MAAM,EAAE,GAAG,SAAS;AAAA,MACpB,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB;AAAA,EACD;AACD;AAEA,eAAe,WAAkC;AAChD,MAAI,iBAAyB,CAAC;AAC9B,QAAM,aAAa,KAAK,cAAc;AAEtC,MAAI,WAAW,KAAK,CAAC,UAAU,MAAM,SAAS,SAAS,GAAG;AACzD,qBAAiB,UAAM,kCAAkB,MAAM,IAAI;AAAA,EACpD;AAEA,QAAM,QAAsB,eAAe,IAAI,CAAC,UAAU;AAAA,IACzD,MAAM;AAAA,IACN,UAAU;AAAA,MACT,MAAM,KAAK;AAAA,MACX,aAAa,KAAK;AAAA,MAClB,gBAAY,2CAAgB,KAAK,MAAM;AAAA,IACxC;AAAA,EACD,EAAE;AAEF,SAAO,EAAE,OAAO,eAAe;AAChC;","names":[]}
@@ -0,0 +1,107 @@
1
+ "use strict";
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __export = (target, all) => {
9
+ for (var name in all)
10
+ __defProp(target, name, { get: all[name], enumerable: true });
11
+ };
12
+ var __copyProps = (to, from, except, desc) => {
13
+ if (from && typeof from === "object" || typeof from === "function") {
14
+ for (let key of __getOwnPropNames(from))
15
+ if (!__hasOwnProp.call(to, key) && key !== except)
16
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
+ }
18
+ return to;
19
+ };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
28
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
+ var versionDescription_exports = {};
30
+ __export(versionDescription_exports, {
31
+ versionDescription: () => versionDescription
32
+ });
33
+ module.exports = __toCommonJS(versionDescription_exports);
34
+ var import_n8n_workflow = require("n8n-workflow");
35
+ var image = __toESM(require("./image"));
36
+ var text = __toESM(require("./text"));
37
+ const versionDescription = {
38
+ displayName: "Ollama",
39
+ name: "ollama",
40
+ icon: "file:ollama.svg",
41
+ group: ["transform"],
42
+ version: 1,
43
+ subtitle: '={{ $parameter["operation"] + ": " + $parameter["resource"] }}',
44
+ description: "Interact with Ollama AI models",
45
+ defaults: {
46
+ name: "Ollama"
47
+ },
48
+ usableAsTool: true,
49
+ codex: {
50
+ alias: ["LangChain", "image", "vision", "AI", "local"],
51
+ categories: ["AI"],
52
+ subcategories: {
53
+ AI: ["Agents", "Miscellaneous", "Root Nodes"]
54
+ },
55
+ resources: {
56
+ primaryDocumentation: [
57
+ {
58
+ url: "https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.ollama/"
59
+ }
60
+ ]
61
+ }
62
+ },
63
+ inputs: `={{
64
+ (() => {
65
+ const resource = $parameter.resource;
66
+ const operation = $parameter.operation;
67
+ if (resource === 'text' && operation === 'message') {
68
+ return [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];
69
+ }
70
+
71
+ return ['main'];
72
+ })()
73
+ }}`,
74
+ outputs: [import_n8n_workflow.NodeConnectionTypes.Main],
75
+ credentials: [
76
+ {
77
+ name: "ollamaApi",
78
+ required: true
79
+ }
80
+ ],
81
+ properties: [
82
+ {
83
+ displayName: "Resource",
84
+ name: "resource",
85
+ type: "options",
86
+ noDataExpression: true,
87
+ options: [
88
+ {
89
+ name: "Image",
90
+ value: "image"
91
+ },
92
+ {
93
+ name: "Text",
94
+ value: "text"
95
+ }
96
+ ],
97
+ default: "text"
98
+ },
99
+ ...image.description,
100
+ ...text.description
101
+ ]
102
+ };
103
+ // Annotate the CommonJS export names for ESM import in node:
104
+ 0 && (module.exports = {
105
+ versionDescription
106
+ });
107
+ //# sourceMappingURL=versionDescription.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../../nodes/vendors/Ollama/actions/versionDescription.ts"],"sourcesContent":["/* eslint-disable n8n-nodes-base/node-filename-against-convention */\nimport { NodeConnectionTypes, type INodeTypeDescription } from 'n8n-workflow';\n\nimport * as image from './image';\nimport * as text from './text';\n\nexport const versionDescription: INodeTypeDescription = {\n\tdisplayName: 'Ollama',\n\tname: 'ollama',\n\ticon: 'file:ollama.svg',\n\tgroup: ['transform'],\n\tversion: 1,\n\tsubtitle: '={{ $parameter[\"operation\"] + \": \" + $parameter[\"resource\"] }}',\n\tdescription: 'Interact with Ollama AI models',\n\tdefaults: {\n\t\tname: 'Ollama',\n\t},\n\tusableAsTool: true,\n\tcodex: {\n\t\talias: ['LangChain', 'image', 'vision', 'AI', 'local'],\n\t\tcategories: ['AI'],\n\t\tsubcategories: {\n\t\t\tAI: ['Agents', 'Miscellaneous', 'Root Nodes'],\n\t\t},\n\t\tresources: {\n\t\t\tprimaryDocumentation: [\n\t\t\t\t{\n\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.ollama/',\n\t\t\t\t},\n\t\t\t],\n\t\t},\n\t},\n\tinputs: `={{\n\t\t(() => {\n\t\t\tconst resource = $parameter.resource;\n\t \tconst operation = $parameter.operation;\n\t\t\tif (resource === 'text' && operation === 'message') {\n\t\t\t\treturn [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];\n\t\t\t}\n\n\t\t\treturn ['main'];\n\t\t})()\n\t}}`,\n\toutputs: [NodeConnectionTypes.Main],\n\tcredentials: [\n\t\t{\n\t\t\tname: 'ollamaApi',\n\t\t\trequired: true,\n\t\t},\n\t],\n\tproperties: [\n\t\t{\n\t\t\tdisplayName: 'Resource',\n\t\t\tname: 'resource',\n\t\t\ttype: 'options',\n\t\t\tnoDataExpression: true,\n\t\t\toptions: [\n\t\t\t\t{\n\t\t\t\t\tname: 'Image',\n\t\t\t\t\tvalue: 'image',\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'Text',\n\t\t\t\t\tvalue: 'text',\n\t\t\t\t},\n\t\t\t],\n\t\t\tdefault: 'text',\n\t\t},\n\t\t...image.description,\n\t\t...text.description,\n\t],\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAA+D;AAE/D,YAAuB;AACvB,WAAsB;AAEf,MAAM,qBAA2C;AAAA,EACvD,aAAa;AAAA,EACb,MAAM;AAAA,EACN,MAAM;AAAA,EACN,OAAO,CAAC,WAAW;AAAA,EACnB,SAAS;AAAA,EACT,UAAU;AAAA,EACV,aAAa;AAAA,EACb,UAAU;AAAA,IACT,MAAM;AAAA,EACP;AAAA,EACA,cAAc;AAAA,EACd,OAAO;AAAA,IACN,OAAO,CAAC,aAAa,SAAS,UAAU,MAAM,OAAO;AAAA,IACrD,YAAY,CAAC,IAAI;AAAA,IACjB,eAAe;AAAA,MACd,IAAI,CAAC,UAAU,iBAAiB,YAAY;AAAA,IAC7C;AAAA,IACA,WAAW;AAAA,MACV,sBAAsB;AAAA,QACrB;AAAA,UACC,KAAK;AAAA,QACN;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EACA,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWR,SAAS,CAAC,wCAAoB,IAAI;AAAA,EAClC,aAAa;AAAA,IACZ;AAAA,MACC,MAAM;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AAAA,EACA,YAAY;AAAA,IACX;AAAA,MACC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,kBAAkB;AAAA,MAClB,SAAS;AAAA,QACR;AAAA,UACC,MAAM;AAAA,UACN,OAAO;AAAA,QACR;AAAA,QACA;AAAA,UACC,MAAM;AAAA,UACN,OAAO;AAAA,QACR;AAAA,MACD;AAAA,MACA,SAAS;AAAA,IACV;AAAA,IACA,GAAG,MAAM;AAAA,IACT,GAAG,KAAK;AAAA,EACT;AACD;","names":[]}
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __copyProps = (to, from, except, desc) => {
7
+ if (from && typeof from === "object" || typeof from === "function") {
8
+ for (let key of __getOwnPropNames(from))
9
+ if (!__hasOwnProp.call(to, key) && key !== except)
10
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
11
+ }
12
+ return to;
13
+ };
14
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
15
+ var helpers_exports = {};
16
+ module.exports = __toCommonJS(helpers_exports);
17
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../../nodes/vendors/Ollama/helpers/index.ts"],"sourcesContent":["export type * from './interfaces';\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]}
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __copyProps = (to, from, except, desc) => {
7
+ if (from && typeof from === "object" || typeof from === "function") {
8
+ for (let key of __getOwnPropNames(from))
9
+ if (!__hasOwnProp.call(to, key) && key !== except)
10
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
11
+ }
12
+ return to;
13
+ };
14
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
15
+ var interfaces_exports = {};
16
+ module.exports = __toCommonJS(interfaces_exports);
17
+ //# sourceMappingURL=interfaces.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../../nodes/vendors/Ollama/helpers/interfaces.ts"],"sourcesContent":["export interface OllamaMessage {\n\trole: 'system' | 'user' | 'assistant' | 'tool';\n\tcontent: string;\n\timages?: string[];\n\ttool_calls?: ToolCall[];\n\ttool_name?: string;\n}\n\nexport interface ToolCall {\n\tfunction: {\n\t\tname: string;\n\t\targuments: Record<string, any>;\n\t};\n}\n\nexport interface OllamaTool {\n\ttype: 'function';\n\tfunction: {\n\t\tname: string;\n\t\tdescription: string;\n\t\tparameters: Record<string, unknown>;\n\t};\n}\n\nexport interface OllamaChatResponse {\n\tmodel: string;\n\tcreated_at: string;\n\tmessage: OllamaMessage;\n\tdone: boolean;\n\tdone_reason?: string;\n\ttotal_duration?: number;\n\tload_duration?: number;\n\tprompt_eval_count?: number;\n\tprompt_eval_duration?: number;\n\teval_count?: number;\n\teval_duration?: number;\n}\n\nexport interface OllamaModel {\n\tname: string;\n\tmodified_at: string;\n\tsize: number;\n\tdigest: string;\n\tdetails: {\n\t\tformat: string;\n\t\tfamily: string;\n\t\tfamilies: string[] | null;\n\t\tparameter_size: string;\n\t\tquantization_level: string;\n\t};\n}\n\nexport interface OllamaTagsResponse {\n\tmodels: OllamaModel[];\n}\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]}