@atom8n/n8n-nodes-langchain 2.5.5 → 2.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,67 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+ var NineRouterApi_credentials_exports = {};
20
+ __export(NineRouterApi_credentials_exports, {
21
+ NineRouterApi: () => NineRouterApi
22
+ });
23
+ module.exports = __toCommonJS(NineRouterApi_credentials_exports);
24
+ class NineRouterApi {
25
+ constructor() {
26
+ this.name = "nineRouterApi";
27
+ this.displayName = "9Router";
28
+ this.documentationUrl = "nineRouter";
29
+ this.properties = [
30
+ {
31
+ displayName: "API Key",
32
+ name: "apiKey",
33
+ type: "string",
34
+ typeOptions: { password: true },
35
+ required: false,
36
+ default: "",
37
+ description: "Optional API key if REQUIRE_API_KEY is enabled on your 9Router instance"
38
+ },
39
+ {
40
+ displayName: "Base URL",
41
+ name: "url",
42
+ type: "string",
43
+ default: "http://localhost:20128/api/v1",
44
+ description: "Base URL of your 9Router instance"
45
+ }
46
+ ];
47
+ this.authenticate = {
48
+ type: "generic",
49
+ properties: {
50
+ headers: {
51
+ Authorization: "=Bearer {{$credentials.apiKey}}"
52
+ }
53
+ }
54
+ };
55
+ this.test = {
56
+ request: {
57
+ baseURL: "={{ $credentials.url }}",
58
+ url: "/models"
59
+ }
60
+ };
61
+ }
62
+ }
63
+ // Annotate the CommonJS export names for ESM import in node:
64
+ 0 && (module.exports = {
65
+ NineRouterApi
66
+ });
67
+ //# sourceMappingURL=NineRouterApi.credentials.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../credentials/NineRouterApi.credentials.ts"],"sourcesContent":["import type {\n\tIAuthenticateGeneric,\n\tICredentialTestRequest,\n\tICredentialType,\n\tINodeProperties,\n} from 'n8n-workflow';\n\nexport class NineRouterApi implements ICredentialType {\n\tname = 'nineRouterApi';\n\n\tdisplayName = '9Router';\n\n\tdocumentationUrl = 'nineRouter';\n\n\tproperties: INodeProperties[] = [\n\t\t{\n\t\t\tdisplayName: 'API Key',\n\t\t\tname: 'apiKey',\n\t\t\ttype: 'string',\n\t\t\ttypeOptions: { password: true },\n\t\t\trequired: false,\n\t\t\tdefault: '',\n\t\t\tdescription: 'Optional API key if REQUIRE_API_KEY is enabled on your 9Router instance',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Base URL',\n\t\t\tname: 'url',\n\t\t\ttype: 'string',\n\t\t\tdefault: 'http://localhost:20128/api/v1',\n\t\t\tdescription: 'Base URL of your 9Router instance',\n\t\t},\n\t];\n\n\tauthenticate: IAuthenticateGeneric = {\n\t\ttype: 'generic',\n\t\tproperties: {\n\t\t\theaders: {\n\t\t\t\tAuthorization: '=Bearer {{$credentials.apiKey}}',\n\t\t\t},\n\t\t},\n\t};\n\n\ttest: ICredentialTestRequest = {\n\t\trequest: {\n\t\t\tbaseURL: '={{ $credentials.url }}',\n\t\t\turl: '/models',\n\t\t},\n\t};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAOO,MAAM,cAAyC;AAAA,EAA/C;AACN,gBAAO;AAEP,uBAAc;AAEd,4BAAmB;AAEnB,sBAAgC;AAAA,MAC/B;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa,EAAE,UAAU,KAAK;AAAA,QAC9B,UAAU;AAAA,QACV,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,MACd;AAAA,IACD;AAEA,wBAAqC;AAAA,MACpC,MAAM;AAAA,MACN,YAAY;AAAA,QACX,SAAS;AAAA,UACR,eAAe;AAAA,QAChB;AAAA,MACD;AAAA,IACD;AAEA,gBAA+B;AAAA,MAC9B,SAAS;AAAA,QACR,SAAS;AAAA,QACT,KAAK;AAAA,MACN;AAAA,IACD;AAAA;AACD;","names":[]}
@@ -125,6 +125,13 @@
125
125
  "lmOllama"
126
126
  ]
127
127
  },
128
+ "nineRouterApi": {
129
+ "className": "NineRouterApi",
130
+ "sourcePath": "dist/credentials/NineRouterApi.credentials.js",
131
+ "supportedNodes": [
132
+ "lmChat9Router"
133
+ ]
134
+ },
128
135
  "openRouterApi": {
129
136
  "className": "OpenRouterApi",
130
137
  "sourcePath": "dist/credentials/OpenRouterApi.credentials.js",
@@ -131,6 +131,14 @@
131
131
  "className": "LmChatCursorAgent",
132
132
  "sourcePath": "dist/nodes/llms/LmChatCursorAgent/LmChatCursorAgent.node.js"
133
133
  },
134
+ "lmChatCodexCli": {
135
+ "className": "LmChatCodexCli",
136
+ "sourcePath": "dist/nodes/llms/LmChatCodexCli/LmChatCodexCli.node.js"
137
+ },
138
+ "lmChatOpenCodeCli": {
139
+ "className": "LmChatOpenCodeCli",
140
+ "sourcePath": "dist/nodes/llms/LmChatOpenCodeCli/LmChatOpenCodeCli.node.js"
141
+ },
134
142
  "lmChatDeepSeek": {
135
143
  "className": "LmChatDeepSeek",
136
144
  "sourcePath": "dist/nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.js"
@@ -159,6 +167,10 @@
159
167
  "className": "LmChatOllama",
160
168
  "sourcePath": "dist/nodes/llms/LMChatOllama/LmChatOllama.node.js"
161
169
  },
170
+ "lmChat9Router": {
171
+ "className": "LmChat9Router",
172
+ "sourcePath": "dist/nodes/llms/LmChat9Router/LmChat9Router.node.js"
173
+ },
162
174
  "lmChatOpenRouter": {
163
175
  "className": "LmChatOpenRouter",
164
176
  "sourcePath": "dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js"
@@ -0,0 +1,10 @@
1
+ <svg width="40" height="40" viewBox="0 0 40 40" fill="none" xmlns="http://www.w3.org/2000/svg">
2
+ <rect width="40" height="40" rx="8" fill="url(#gradient_dark)"/>
3
+ <text x="20" y="30" font-family="system-ui, -apple-system, sans-serif" font-size="26" font-weight="700" fill="white" text-anchor="middle">9</text>
4
+ <defs>
5
+ <linearGradient id="gradient_dark" x1="0" y1="0" x2="40" y2="40" gradientUnits="userSpaceOnUse">
6
+ <stop stop-color="#fb923c"/>
7
+ <stop offset="1" stop-color="#ea6a12"/>
8
+ </linearGradient>
9
+ </defs>
10
+ </svg>
@@ -0,0 +1,10 @@
1
+ <svg width="40" height="40" viewBox="0 0 40 40" fill="none" xmlns="http://www.w3.org/2000/svg">
2
+ <rect width="40" height="40" rx="8" fill="url(#gradient_light)"/>
3
+ <text x="20" y="30" font-family="system-ui, -apple-system, sans-serif" font-size="26" font-weight="700" fill="white" text-anchor="middle">9</text>
4
+ <defs>
5
+ <linearGradient id="gradient_light" x1="0" y1="0" x2="40" y2="40" gradientUnits="userSpaceOnUse">
6
+ <stop stop-color="#f97815"/>
7
+ <stop offset="1" stop-color="#c2590a"/>
8
+ </linearGradient>
9
+ </defs>
10
+ </svg>
@@ -0,0 +1,248 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+ var LmChat9Router_node_exports = {};
20
+ __export(LmChat9Router_node_exports, {
21
+ LmChat9Router: () => LmChat9Router
22
+ });
23
+ module.exports = __toCommonJS(LmChat9Router_node_exports);
24
+ var import_openai = require("@langchain/openai");
25
+ var import_n8n_workflow = require("n8n-workflow");
26
+ var import_httpProxyAgent = require("../../../utils/httpProxyAgent");
27
+ var import_sharedFields = require("../../../utils/sharedFields");
28
+ var import_error_handling = require("../../vendors/OpenAi/helpers/error-handling");
29
+ var import_n8nLlmFailedAttemptHandler = require("../n8nLlmFailedAttemptHandler");
30
+ var import_N8nLlmTracing = require("../N8nLlmTracing");
31
+ class LmChat9Router {
32
+ constructor() {
33
+ this.description = {
34
+ displayName: "9Router Chat Model",
35
+ name: "lmChat9Router",
36
+ icon: { light: "file:9router.svg", dark: "file:9router.dark.svg" },
37
+ group: ["transform"],
38
+ version: [1],
39
+ description: "For advanced usage with an AI chain",
40
+ defaults: {
41
+ name: "9Router Chat Model"
42
+ },
43
+ codex: {
44
+ categories: ["AI"],
45
+ subcategories: {
46
+ AI: ["Language Models", "Root Nodes"],
47
+ "Language Models": ["Chat Models (Recommended)"]
48
+ },
49
+ resources: {
50
+ primaryDocumentation: [
51
+ {
52
+ url: "https://github.com/9router/9router"
53
+ }
54
+ ]
55
+ }
56
+ },
57
+ inputs: [],
58
+ outputs: [import_n8n_workflow.NodeConnectionTypes.AiLanguageModel],
59
+ outputNames: ["Model"],
60
+ credentials: [
61
+ {
62
+ name: "nineRouterApi",
63
+ required: true
64
+ }
65
+ ],
66
+ requestDefaults: {
67
+ ignoreHttpStatusErrors: true,
68
+ baseURL: "={{ $credentials?.url }}"
69
+ },
70
+ properties: [
71
+ (0, import_sharedFields.getConnectionHintNoticeField)([import_n8n_workflow.NodeConnectionTypes.AiChain, import_n8n_workflow.NodeConnectionTypes.AiAgent]),
72
+ {
73
+ displayName: 'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
74
+ name: "notice",
75
+ type: "notice",
76
+ default: "",
77
+ displayOptions: {
78
+ show: {
79
+ "/options.responseFormat": ["json_object"]
80
+ }
81
+ }
82
+ },
83
+ {
84
+ displayName: "Model",
85
+ name: "model",
86
+ type: "options",
87
+ description: 'The model which will generate the completion. <a href="http://localhost:20128">Learn more</a>.',
88
+ typeOptions: {
89
+ loadOptions: {
90
+ routing: {
91
+ request: {
92
+ method: "GET",
93
+ url: "/models"
94
+ },
95
+ output: {
96
+ postReceive: [
97
+ {
98
+ type: "rootProperty",
99
+ properties: {
100
+ property: "data"
101
+ }
102
+ },
103
+ {
104
+ type: "setKeyValue",
105
+ properties: {
106
+ name: "={{$responseItem.id}}",
107
+ value: "={{$responseItem.id}}"
108
+ }
109
+ },
110
+ {
111
+ type: "sort",
112
+ properties: {
113
+ key: "name"
114
+ }
115
+ }
116
+ ]
117
+ }
118
+ }
119
+ }
120
+ },
121
+ routing: {
122
+ send: {
123
+ type: "body",
124
+ property: "model"
125
+ }
126
+ },
127
+ default: "openai/gpt-4o"
128
+ },
129
+ {
130
+ displayName: "Options",
131
+ name: "options",
132
+ placeholder: "Add Option",
133
+ description: "Additional options to add",
134
+ type: "collection",
135
+ default: {},
136
+ options: [
137
+ {
138
+ displayName: "Frequency Penalty",
139
+ name: "frequencyPenalty",
140
+ default: 0,
141
+ typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
142
+ description: "Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
143
+ type: "number"
144
+ },
145
+ {
146
+ displayName: "Maximum Number of Tokens",
147
+ name: "maxTokens",
148
+ default: -1,
149
+ description: "The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).",
150
+ type: "number",
151
+ typeOptions: {
152
+ maxValue: 32768
153
+ }
154
+ },
155
+ {
156
+ displayName: "Response Format",
157
+ name: "responseFormat",
158
+ default: "text",
159
+ type: "options",
160
+ options: [
161
+ {
162
+ name: "Text",
163
+ value: "text",
164
+ description: "Regular text response"
165
+ },
166
+ {
167
+ name: "JSON",
168
+ value: "json_object",
169
+ description: "Enables JSON mode, which should guarantee the message the model generates is valid JSON"
170
+ }
171
+ ]
172
+ },
173
+ {
174
+ displayName: "Presence Penalty",
175
+ name: "presencePenalty",
176
+ default: 0,
177
+ typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
178
+ description: "Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
179
+ type: "number"
180
+ },
181
+ {
182
+ displayName: "Sampling Temperature",
183
+ name: "temperature",
184
+ default: 0.7,
185
+ typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
186
+ description: "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
187
+ type: "number"
188
+ },
189
+ {
190
+ displayName: "Timeout",
191
+ name: "timeout",
192
+ default: 36e4,
193
+ description: "Maximum amount of time a request is allowed to take in milliseconds",
194
+ type: "number"
195
+ },
196
+ {
197
+ displayName: "Max Retries",
198
+ name: "maxRetries",
199
+ default: 2,
200
+ description: "Maximum number of retries to attempt",
201
+ type: "number"
202
+ },
203
+ {
204
+ displayName: "Top P",
205
+ name: "topP",
206
+ default: 1,
207
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
208
+ description: "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.",
209
+ type: "number"
210
+ }
211
+ ]
212
+ }
213
+ ]
214
+ };
215
+ }
216
+ async supplyData(itemIndex) {
217
+ const credentials = await this.getCredentials("nineRouterApi");
218
+ const modelName = this.getNodeParameter("model", itemIndex);
219
+ const options = this.getNodeParameter("options", itemIndex, {});
220
+ const configuration = {
221
+ baseURL: credentials.url,
222
+ fetchOptions: {
223
+ dispatcher: (0, import_httpProxyAgent.getProxyAgent)(credentials.url)
224
+ }
225
+ };
226
+ const model = new import_openai.ChatOpenAI({
227
+ apiKey: credentials.apiKey || "no-key",
228
+ model: modelName,
229
+ ...options,
230
+ timeout: options.timeout ?? 6e4,
231
+ maxRetries: options.maxRetries ?? 2,
232
+ configuration,
233
+ callbacks: [new import_N8nLlmTracing.N8nLlmTracing(this)],
234
+ modelKwargs: options.responseFormat ? {
235
+ response_format: { type: options.responseFormat }
236
+ } : void 0,
237
+ onFailedAttempt: (0, import_n8nLlmFailedAttemptHandler.makeN8nLlmFailedAttemptHandler)(this, import_error_handling.openAiFailedAttemptHandler)
238
+ });
239
+ return {
240
+ response: model
241
+ };
242
+ }
243
+ }
244
+ // Annotate the CommonJS export names for ESM import in node:
245
+ 0 && (module.exports = {
246
+ LmChat9Router
247
+ });
248
+ //# sourceMappingURL=LmChat9Router.node.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../../../nodes/llms/LmChat9Router/LmChat9Router.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChat9Router implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: '9Router Chat Model',\n\t\tname: 'lmChat9Router',\n\t\ticon: { light: 'file:9router.svg', dark: 'file:9router.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: '9Router Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://github.com/9router/9router',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'nineRouterApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"http://localhost:20128\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'openai/gpt-4o',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('nineRouterApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\tfetchOptions: {\n\t\t\t\tdispatcher: getProxyAgent(credentials.url),\n\t\t\t},\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\tapiKey: credentials.apiKey || 'no-key',\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,cAAmC;AAAA,EAAzC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,oBAAoB,MAAM,wBAAwB;AAAA,MACjE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,eAAe;AAEzF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,cAAc;AAAA,QACb,gBAAY,qCAAc,YAAY,GAAG;AAAA,MAC1C;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,QAAQ,YAAY,UAAU;AAAA,MAC9B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}