@n8n/n8n-nodes-langchain 1.117.0 → 1.118.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/dist/credentials/McpOAuth2Api.credentials.js +45 -0
  2. package/dist/credentials/McpOAuth2Api.credentials.js.map +1 -0
  3. package/dist/known/credentials.json +10 -0
  4. package/dist/known/nodes.json +4 -0
  5. package/dist/nodes/Guardrails/Guardrails.node.js +73 -0
  6. package/dist/nodes/Guardrails/Guardrails.node.js.map +1 -0
  7. package/dist/nodes/Guardrails/actions/checks/jailbreak.js +50 -0
  8. package/dist/nodes/Guardrails/actions/checks/jailbreak.js.map +1 -0
  9. package/dist/nodes/Guardrails/actions/checks/keywords.js +66 -0
  10. package/dist/nodes/Guardrails/actions/checks/keywords.js.map +1 -0
  11. package/dist/nodes/Guardrails/actions/checks/nsfw.js +53 -0
  12. package/dist/nodes/Guardrails/actions/checks/nsfw.js.map +1 -0
  13. package/dist/nodes/Guardrails/actions/checks/pii.js +232 -0
  14. package/dist/nodes/Guardrails/actions/checks/pii.js.map +1 -0
  15. package/dist/nodes/Guardrails/actions/checks/secretKeys.js +201 -0
  16. package/dist/nodes/Guardrails/actions/checks/secretKeys.js.map +1 -0
  17. package/dist/nodes/Guardrails/actions/checks/topicalAlignment.js +38 -0
  18. package/dist/nodes/Guardrails/actions/checks/topicalAlignment.js.map +1 -0
  19. package/dist/nodes/Guardrails/actions/checks/urls.js +245 -0
  20. package/dist/nodes/Guardrails/actions/checks/urls.js.map +1 -0
  21. package/dist/nodes/Guardrails/actions/process.js +220 -0
  22. package/dist/nodes/Guardrails/actions/process.js.map +1 -0
  23. package/dist/nodes/Guardrails/actions/types.js +35 -0
  24. package/dist/nodes/Guardrails/actions/types.js.map +1 -0
  25. package/dist/nodes/Guardrails/description.js +454 -0
  26. package/dist/nodes/Guardrails/description.js.map +1 -0
  27. package/dist/nodes/Guardrails/guardrails.svg +11 -0
  28. package/dist/nodes/Guardrails/helpers/base.js +67 -0
  29. package/dist/nodes/Guardrails/helpers/base.js.map +1 -0
  30. package/dist/nodes/Guardrails/helpers/common.js +45 -0
  31. package/dist/nodes/Guardrails/helpers/common.js.map +1 -0
  32. package/dist/nodes/Guardrails/helpers/configureNodeInputs.js +50 -0
  33. package/dist/nodes/Guardrails/helpers/configureNodeInputs.js.map +1 -0
  34. package/dist/nodes/Guardrails/helpers/mappers.js +100 -0
  35. package/dist/nodes/Guardrails/helpers/mappers.js.map +1 -0
  36. package/dist/nodes/Guardrails/helpers/model.js +144 -0
  37. package/dist/nodes/Guardrails/helpers/model.js.map +1 -0
  38. package/dist/nodes/Guardrails/helpers/preflight.js +61 -0
  39. package/dist/nodes/Guardrails/helpers/preflight.js.map +1 -0
  40. package/dist/nodes/agents/Agent/V1/AgentV1.node.js +6 -0
  41. package/dist/nodes/agents/Agent/V1/AgentV1.node.js.map +1 -1
  42. package/dist/nodes/agents/Agent/V2/AgentV2.node.js +8 -0
  43. package/dist/nodes/agents/Agent/V2/AgentV2.node.js.map +1 -1
  44. package/dist/nodes/agents/Agent/V3/AgentV3.node.js +8 -0
  45. package/dist/nodes/agents/Agent/V3/AgentV3.node.js.map +1 -1
  46. package/dist/nodes/agents/Agent/agents/SqlAgent/description.js +10 -0
  47. package/dist/nodes/agents/Agent/agents/SqlAgent/description.js.map +1 -1
  48. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js +22 -0
  49. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js.map +1 -1
  50. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js +8 -3
  51. package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js.map +1 -1
  52. package/dist/nodes/chains/ChainLLM/methods/config.js +4 -0
  53. package/dist/nodes/chains/ChainLLM/methods/config.js.map +1 -1
  54. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js +6 -0
  55. package/dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js.map +1 -1
  56. package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js +448 -24
  57. package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js.map +1 -1
  58. package/dist/nodes/llms/LMChatOpenAi/common.js +155 -0
  59. package/dist/nodes/llms/LMChatOpenAi/common.js.map +1 -0
  60. package/dist/nodes/llms/LMChatOpenAi/types.js +17 -0
  61. package/dist/nodes/llms/LMChatOpenAi/types.js.map +1 -0
  62. package/dist/nodes/mcp/McpClientTool/McpClientTool.node.js +48 -3
  63. package/dist/nodes/mcp/McpClientTool/McpClientTool.node.js.map +1 -1
  64. package/dist/nodes/mcp/McpClientTool/loadOptions.js +2 -1
  65. package/dist/nodes/mcp/McpClientTool/loadOptions.js.map +1 -1
  66. package/dist/nodes/mcp/McpClientTool/types.js.map +1 -1
  67. package/dist/nodes/mcp/McpClientTool/utils.js +66 -4
  68. package/dist/nodes/mcp/McpClientTool/utils.js.map +1 -1
  69. package/dist/nodes/trigger/ChatTrigger/ChatTrigger.node.js +45 -2
  70. package/dist/nodes/trigger/ChatTrigger/ChatTrigger.node.js.map +1 -1
  71. package/dist/nodes/vendors/OpenAi/helpers/utils.js +5 -0
  72. package/dist/nodes/vendors/OpenAi/helpers/utils.js.map +1 -1
  73. package/dist/nodes/vendors/OpenAi/v1/actions/assistant/message.operation.js +6 -12
  74. package/dist/nodes/vendors/OpenAi/v1/actions/assistant/message.operation.js.map +1 -1
  75. package/dist/nodes/vendors/OpenAi/v2/actions/text/response.operation.js +31 -6
  76. package/dist/nodes/vendors/OpenAi/v2/actions/text/response.operation.js.map +1 -1
  77. package/dist/types/credentials.json +1 -0
  78. package/dist/types/nodes.json +12 -11
  79. package/dist/utils/descriptions.js +18 -0
  80. package/dist/utils/descriptions.js.map +1 -1
  81. package/dist/utils/helpers.js +4 -1
  82. package/dist/utils/helpers.js.map +1 -1
  83. package/package.json +10 -8
@@ -1,7 +1,9 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
5
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
8
  var __export = (target, all) => {
7
9
  for (var name in all)
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
15
17
  }
16
18
  return to;
17
19
  };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
18
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
29
  var LmChatOpenAi_node_exports = {};
20
30
  __export(LmChatOpenAi_node_exports, {
@@ -22,13 +32,49 @@ __export(LmChatOpenAi_node_exports, {
22
32
  });
23
33
  module.exports = __toCommonJS(LmChatOpenAi_node_exports);
24
34
  var import_openai = require("@langchain/openai");
35
+ var import_pick = __toESM(require("lodash/pick"));
25
36
  var import_n8n_workflow = require("n8n-workflow");
26
37
  var import_httpProxyAgent = require("../../../utils/httpProxyAgent");
27
38
  var import_sharedFields = require("../../../utils/sharedFields");
28
- var import_loadModels = require("./methods/loadModels");
29
39
  var import_error_handling = require("../../vendors/OpenAi/helpers/error-handling");
30
40
  var import_n8nLlmFailedAttemptHandler = require("../n8nLlmFailedAttemptHandler");
31
41
  var import_N8nLlmTracing = require("../N8nLlmTracing");
42
+ var import_common = require("./common");
43
+ var import_loadModels = require("./methods/loadModels");
44
+ const INCLUDE_JSON_WARNING = {
45
+ displayName: 'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
46
+ name: "notice",
47
+ type: "notice",
48
+ default: ""
49
+ };
50
+ const completionsResponseFormat = {
51
+ displayName: "Response Format",
52
+ name: "responseFormat",
53
+ default: "text",
54
+ type: "options",
55
+ options: [
56
+ {
57
+ name: "Text",
58
+ value: "text",
59
+ description: "Regular text response"
60
+ },
61
+ {
62
+ name: "JSON",
63
+ value: "json_object",
64
+ description: "Enables JSON mode, which should guarantee the message the model generates is valid JSON"
65
+ }
66
+ ]
67
+ };
68
+ const jsonSchemaExample = `{
69
+ "type": "object",
70
+ "properties": {
71
+ "message": {
72
+ "type": "string"
73
+ }
74
+ },
75
+ "additionalProperties": false,
76
+ "required": ["message"]
77
+ }`;
32
78
  class LmChatOpenAi {
33
79
  constructor() {
34
80
  this.methods = {
@@ -41,7 +87,7 @@ class LmChatOpenAi {
41
87
  name: "lmChatOpenAi",
42
88
  icon: { light: "file:openAiLight.svg", dark: "file:openAiLight.dark.svg" },
43
89
  group: ["transform"],
44
- version: [1, 1.1, 1.2],
90
+ version: [1, 1.1, 1.2, 1.3],
45
91
  description: "For advanced usage with an AI chain",
46
92
  defaults: {
47
93
  name: "OpenAI Chat Model"
@@ -76,16 +122,21 @@ class LmChatOpenAi {
76
122
  properties: [
77
123
  (0, import_sharedFields.getConnectionHintNoticeField)([import_n8n_workflow.NodeConnectionTypes.AiChain, import_n8n_workflow.NodeConnectionTypes.AiAgent]),
78
124
  {
79
- displayName: 'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
80
- name: "notice",
81
- type: "notice",
82
- default: "",
125
+ ...INCLUDE_JSON_WARNING,
83
126
  displayOptions: {
84
127
  show: {
85
128
  "/options.responseFormat": ["json_object"]
86
129
  }
87
130
  }
88
131
  },
132
+ {
133
+ ...INCLUDE_JSON_WARNING,
134
+ displayOptions: {
135
+ show: {
136
+ "/options.textFormat.textOptions.type": ["json_object"]
137
+ }
138
+ }
139
+ },
89
140
  {
90
141
  displayName: "Model",
91
142
  name: "model",
@@ -193,6 +244,118 @@ class LmChatOpenAi {
193
244
  }
194
245
  }
195
246
  },
247
+ {
248
+ displayName: "Use Responses API",
249
+ name: "responsesApiEnabled",
250
+ type: "boolean",
251
+ default: true,
252
+ description: 'Whether to use the Responses API to generate the response. <a href="https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/#use-responses-api">Learn more</a>.',
253
+ displayOptions: {
254
+ show: {
255
+ "@version": [{ _cnd: { gte: 1.3 } }]
256
+ }
257
+ }
258
+ },
259
+ {
260
+ displayName: "Built-in Tools",
261
+ name: "builtInTools",
262
+ placeholder: "Add Built-in Tool",
263
+ type: "collection",
264
+ default: {},
265
+ options: [
266
+ {
267
+ displayName: "Web Search",
268
+ name: "webSearch",
269
+ type: "collection",
270
+ default: { searchContextSize: "medium" },
271
+ options: [
272
+ {
273
+ displayName: "Search Context Size",
274
+ name: "searchContextSize",
275
+ type: "options",
276
+ default: "medium",
277
+ description: "High level guidance for the amount of context window space to use for the search",
278
+ options: [
279
+ { name: "Low", value: "low" },
280
+ { name: "Medium", value: "medium" },
281
+ { name: "High", value: "high" }
282
+ ]
283
+ },
284
+ {
285
+ displayName: "Web Search Allowed Domains",
286
+ name: "allowedDomains",
287
+ type: "string",
288
+ default: "",
289
+ description: "Comma-separated list of domains to search. Only domains in this list will be searched.",
290
+ placeholder: "e.g. google.com, wikipedia.org"
291
+ },
292
+ {
293
+ displayName: "Country",
294
+ name: "country",
295
+ type: "string",
296
+ default: "",
297
+ placeholder: "e.g. US, GB"
298
+ },
299
+ {
300
+ displayName: "City",
301
+ name: "city",
302
+ type: "string",
303
+ default: "",
304
+ placeholder: "e.g. New York, London"
305
+ },
306
+ {
307
+ displayName: "Region",
308
+ name: "region",
309
+ type: "string",
310
+ default: "",
311
+ placeholder: "e.g. New York, London"
312
+ }
313
+ ]
314
+ },
315
+ {
316
+ displayName: "File Search",
317
+ name: "fileSearch",
318
+ type: "collection",
319
+ default: { vectorStoreIds: "[]" },
320
+ options: [
321
+ {
322
+ displayName: "Vector Store IDs",
323
+ name: "vectorStoreIds",
324
+ description: 'The vector store IDs to use for the file search. Vector stores are managed via OpenAI Dashboard. <a href="https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/#built-in-tools">Learn more</a>.',
325
+ type: "json",
326
+ default: "[]",
327
+ required: true
328
+ },
329
+ {
330
+ displayName: "Filters",
331
+ name: "filters",
332
+ type: "json",
333
+ default: "{}"
334
+ },
335
+ {
336
+ displayName: "Max Results",
337
+ name: "maxResults",
338
+ type: "number",
339
+ default: 1,
340
+ typeOptions: { minValue: 1, maxValue: 50 }
341
+ }
342
+ ]
343
+ },
344
+ {
345
+ displayName: "Code Interpreter",
346
+ name: "codeInterpreter",
347
+ type: "boolean",
348
+ default: true,
349
+ description: "Whether to allow the model to execute code in a sandboxed environment"
350
+ }
351
+ ],
352
+ displayOptions: {
353
+ show: {
354
+ "@version": [{ _cnd: { gte: 1.3 } }],
355
+ "/responsesApiEnabled": [true]
356
+ }
357
+ }
358
+ },
196
359
  {
197
360
  displayName: "Options",
198
361
  name: "options",
@@ -231,23 +394,124 @@ class LmChatOpenAi {
231
394
  maxValue: 32768
232
395
  }
233
396
  },
397
+ {
398
+ ...completionsResponseFormat,
399
+ displayOptions: {
400
+ show: {
401
+ "@version": [{ _cnd: { lt: 1.3 } }]
402
+ }
403
+ }
404
+ },
405
+ {
406
+ ...completionsResponseFormat,
407
+ displayOptions: {
408
+ show: {
409
+ "@version": [{ _cnd: { gte: 1.3 } }],
410
+ "/responsesApiEnabled": [false]
411
+ }
412
+ }
413
+ },
234
414
  {
235
415
  displayName: "Response Format",
236
- name: "responseFormat",
237
- default: "text",
238
- type: "options",
416
+ name: "textFormat",
417
+ type: "fixedCollection",
418
+ default: { textOptions: [{ type: "text" }] },
239
419
  options: [
240
420
  {
241
- name: "Text",
242
- value: "text",
243
- description: "Regular text response"
244
- },
245
- {
246
- name: "JSON",
247
- value: "json_object",
248
- description: "Enables JSON mode, which should guarantee the message the model generates is valid JSON"
421
+ displayName: "Text",
422
+ name: "textOptions",
423
+ values: [
424
+ {
425
+ displayName: "Type",
426
+ name: "type",
427
+ type: "options",
428
+ default: "",
429
+ options: [
430
+ { name: "Text", value: "text" },
431
+ // eslint-disable-next-line n8n-nodes-base/node-param-display-name-miscased
432
+ { name: "JSON Schema (recommended)", value: "json_schema" },
433
+ { name: "JSON Object", value: "json_object" }
434
+ ]
435
+ },
436
+ {
437
+ displayName: "Verbosity",
438
+ name: "verbosity",
439
+ type: "options",
440
+ default: "medium",
441
+ options: [
442
+ { name: "Low", value: "low" },
443
+ { name: "Medium", value: "medium" },
444
+ { name: "High", value: "high" }
445
+ ]
446
+ },
447
+ {
448
+ displayName: "Name",
449
+ name: "name",
450
+ type: "string",
451
+ default: "my_schema",
452
+ description: "The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.",
453
+ displayOptions: {
454
+ show: {
455
+ type: ["json_schema"]
456
+ }
457
+ }
458
+ },
459
+ {
460
+ displayName: 'All properties in the schema must be set to "required", when using "strict" mode.',
461
+ name: "requiredNotice",
462
+ type: "notice",
463
+ default: "",
464
+ displayOptions: {
465
+ show: {
466
+ strict: [true]
467
+ }
468
+ }
469
+ },
470
+ {
471
+ displayName: "Schema",
472
+ name: "schema",
473
+ type: "json",
474
+ default: jsonSchemaExample,
475
+ description: "The schema of the response format",
476
+ displayOptions: {
477
+ show: {
478
+ type: ["json_schema"]
479
+ }
480
+ }
481
+ },
482
+ {
483
+ displayName: "Description",
484
+ name: "description",
485
+ type: "string",
486
+ default: "",
487
+ description: "The description of the response format",
488
+ displayOptions: {
489
+ show: {
490
+ type: ["json_schema"]
491
+ }
492
+ }
493
+ },
494
+ {
495
+ displayName: "Strict",
496
+ name: "strict",
497
+ type: "boolean",
498
+ default: false,
499
+ description: "Whether to require that the AI will always generate responses that match the provided JSON Schema",
500
+ displayOptions: {
501
+ show: {
502
+ type: ["json_schema"]
503
+ }
504
+ }
505
+ }
506
+ ]
249
507
  }
250
- ]
508
+ ],
509
+ displayOptions: {
510
+ show: {
511
+ "@version": [{ _cnd: { gte: 1.3 } }],
512
+ "/responsesApiEnabled": [true]
513
+ }
514
+ }
251
515
  },
252
516
  {
253
517
  displayName: "Presence Penalty",
@@ -316,6 +580,136 @@ class LmChatOpenAi {
316
580
  typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
317
581
  description: "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.",
318
582
  type: "number"
583
+ },
584
+ {
585
+ displayName: "Conversation ID",
586
+ name: "conversationId",
587
+ default: "",
588
+ description: "The conversation that this response belongs to. Input items and output items from this response are automatically added to this conversation after this response completes.",
589
+ type: "string",
590
+ displayOptions: {
591
+ show: {
592
+ "@version": [{ _cnd: { gte: 1.3 } }],
593
+ "/responsesApiEnabled": [true]
594
+ }
595
+ }
596
+ },
597
+ {
598
+ displayName: "Prompt Cache Key",
599
+ name: "promptCacheKey",
600
+ type: "string",
601
+ default: "",
602
+ description: "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates",
603
+ displayOptions: {
604
+ show: {
605
+ "@version": [{ _cnd: { gte: 1.3 } }],
606
+ "/responsesApiEnabled": [true]
607
+ }
608
+ }
609
+ },
610
+ {
611
+ displayName: "Safety Identifier",
612
+ name: "safetyIdentifier",
613
+ type: "string",
614
+ default: "",
615
+ description: "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user.",
616
+ displayOptions: {
617
+ show: {
618
+ "@version": [{ _cnd: { gte: 1.3 } }],
619
+ "/responsesApiEnabled": [true]
620
+ }
621
+ }
622
+ },
623
+ {
624
+ displayName: "Service Tier",
625
+ name: "serviceTier",
626
+ type: "options",
627
+ default: "auto",
628
+ description: "The service tier to use for the request",
629
+ options: [
630
+ { name: "Auto", value: "auto" },
631
+ { name: "Flex", value: "flex" },
632
+ { name: "Default", value: "default" },
633
+ { name: "Priority", value: "priority" }
634
+ ],
635
+ displayOptions: {
636
+ show: {
637
+ "@version": [{ _cnd: { gte: 1.3 } }],
638
+ "/responsesApiEnabled": [true]
639
+ }
640
+ }
641
+ },
642
+ {
643
+ displayName: "Metadata",
644
+ name: "metadata",
645
+ type: "json",
646
+ description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.",
647
+ default: "{}",
648
+ displayOptions: {
649
+ show: {
650
+ "@version": [{ _cnd: { gte: 1.3 } }],
651
+ "/responsesApiEnabled": [true]
652
+ }
653
+ }
654
+ },
655
+ {
656
+ displayName: "Top Logprobs",
657
+ name: "topLogprobs",
658
+ type: "number",
659
+ default: 0,
660
+ description: "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability",
661
+ typeOptions: {
662
+ minValue: 0,
663
+ maxValue: 20
664
+ },
665
+ displayOptions: {
666
+ show: {
667
+ "@version": [{ _cnd: { gte: 1.3 } }],
668
+ "/responsesApiEnabled": [true]
669
+ }
670
+ }
671
+ },
672
+ {
673
+ displayName: "Prompt",
674
+ name: "promptConfig",
675
+ type: "fixedCollection",
676
+ description: 'Configure the reusable prompt template configured via OpenAI Dashboard. <a href="https://platform.openai.com/docs/guides/prompt-engineering#reusable-prompts">Learn more</a>.',
677
+ default: { promptOptions: [{ promptId: "" }] },
678
+ options: [
679
+ {
680
+ displayName: "Prompt",
681
+ name: "promptOptions",
682
+ values: [
683
+ {
684
+ displayName: "Prompt ID",
685
+ name: "promptId",
686
+ type: "string",
687
+ default: "",
688
+ description: "The unique identifier of the prompt template to use"
689
+ },
690
+ {
691
+ displayName: "Version",
692
+ name: "version",
693
+ type: "string",
694
+ default: "",
695
+ description: "Optional version of the prompt template"
696
+ },
697
+ {
698
+ displayName: "Variables",
699
+ name: "variables",
700
+ type: "json",
701
+ default: "{}",
702
+ description: "Variables to be substituted into the prompt template"
703
+ }
704
+ ]
705
+ }
706
+ ],
707
+ displayOptions: {
708
+ show: {
709
+ "@version": [{ _cnd: { gte: 1.3 } }],
710
+ "/responsesApiEnabled": [true]
711
+ }
712
+ }
319
713
  }
320
714
  ]
321
715
  }
@@ -326,6 +720,7 @@ class LmChatOpenAi {
326
720
  const credentials = await this.getCredentials("openAiApi");
327
721
  const version = this.getNode().typeVersion;
328
722
  const modelName = version >= 1.2 ? this.getNodeParameter("model.value", itemIndex) : this.getNodeParameter("model", itemIndex);
723
+ const responsesApiEnabled = this.getNodeParameter("responsesApiEnabled", itemIndex, false);
329
724
  const options = this.getNodeParameter("options", itemIndex, {});
330
725
  const configuration = {};
331
726
  if (options.baseURL) {
@@ -344,20 +739,49 @@ class LmChatOpenAi {
344
739
  };
345
740
  }
346
741
  const modelKwargs = {};
347
- if (options.responseFormat) modelKwargs.response_format = { type: options.responseFormat };
348
- if (options.reasoningEffort && ["low", "medium", "high"].includes(options.reasoningEffort))
349
- modelKwargs.reasoning_effort = options.reasoningEffort;
350
- const model = new import_openai.ChatOpenAI({
742
+ if (responsesApiEnabled) {
743
+ const kwargs = (0, import_common.prepareAdditionalResponsesParams)(options);
744
+ Object.assign(modelKwargs, kwargs);
745
+ } else {
746
+ if (options.responseFormat) modelKwargs.response_format = { type: options.responseFormat };
747
+ if (options.reasoningEffort && ["low", "medium", "high"].includes(options.reasoningEffort)) {
748
+ modelKwargs.reasoning_effort = options.reasoningEffort;
749
+ }
750
+ }
751
+ const includedOptions = (0, import_pick.default)(options, [
752
+ "frequencyPenalty",
753
+ "maxTokens",
754
+ "presencePenalty",
755
+ "temperature",
756
+ "topP",
757
+ "baseURL"
758
+ ]);
759
+ const fields = {
351
760
  apiKey: credentials.apiKey,
352
761
  model: modelName,
353
- ...options,
762
+ ...includedOptions,
354
763
  timeout: options.timeout ?? 6e4,
355
764
  maxRetries: options.maxRetries ?? 2,
356
765
  configuration,
357
766
  callbacks: [new import_N8nLlmTracing.N8nLlmTracing(this)],
358
767
  modelKwargs,
359
768
  onFailedAttempt: (0, import_n8nLlmFailedAttemptHandler.makeN8nLlmFailedAttemptHandler)(this, import_error_handling.openAiFailedAttemptHandler)
360
- });
769
+ };
770
+ if (responsesApiEnabled) {
771
+ fields.useResponsesApi = true;
772
+ }
773
+ const model = new import_openai.ChatOpenAI(fields);
774
+ if (responsesApiEnabled) {
775
+ const tools = (0, import_common.formatBuiltInTools)(
776
+ this.getNodeParameter("builtInTools", itemIndex, {})
777
+ );
778
+ if (tools.length) {
779
+ model.metadata = {
780
+ ...model.metadata,
781
+ tools
782
+ };
783
+ }
784
+ }
361
785
  return {
362
786
  response: model
363
787
  };