n8n-nodes-github-copilot 3.38.25 → 3.38.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/credentials/GitHubCopilotApi.credentials.d.ts +1 -1
- package/dist/credentials/GitHubCopilotApi.credentials.js +25 -25
- package/dist/nodes/GitHubCopilot/GitHubCopilot.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilot/GitHubCopilot.node.js +166 -166
- package/dist/nodes/GitHubCopilotAuthHelper/GitHubCopilotAuthHelper.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilotAuthHelper/GitHubCopilotAuthHelper.node.js +539 -539
- package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js +46 -44
- package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.d.ts +1 -1
- package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.js +82 -82
- package/dist/nodes/GitHubCopilotChatAPI/utils/helpers.d.ts +2 -2
- package/dist/nodes/GitHubCopilotChatAPI/utils/helpers.js +26 -26
- package/dist/nodes/GitHubCopilotChatAPI/utils/imageProcessor.d.ts +2 -2
- package/dist/nodes/GitHubCopilotChatAPI/utils/imageProcessor.js +12 -12
- package/dist/nodes/GitHubCopilotChatAPI/utils/index.d.ts +4 -4
- package/dist/nodes/GitHubCopilotChatAPI/utils/mediaDetection.d.ts +3 -3
- package/dist/nodes/GitHubCopilotChatAPI/utils/mediaDetection.js +19 -19
- package/dist/nodes/GitHubCopilotChatAPI/utils/modelCapabilities.d.ts +1 -1
- package/dist/nodes/GitHubCopilotChatAPI/utils/modelCapabilities.js +23 -23
- package/dist/nodes/GitHubCopilotChatAPI/utils/types.d.ts +5 -5
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js +115 -106
- package/dist/nodes/GitHubCopilotEmbeddings/GitHubCopilotEmbeddings.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilotEmbeddings/GitHubCopilotEmbeddings.node.js +114 -114
- package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.js +74 -69
- package/dist/nodes/GitHubCopilotOpenAI/nodeProperties.d.ts +1 -1
- package/dist/nodes/GitHubCopilotOpenAI/nodeProperties.js +181 -181
- package/dist/nodes/GitHubCopilotOpenAI/utils/index.d.ts +2 -2
- package/dist/nodes/GitHubCopilotOpenAI/utils/openaiCompat.d.ts +10 -10
- package/dist/nodes/GitHubCopilotOpenAI/utils/openaiCompat.js +53 -53
- package/dist/nodes/GitHubCopilotOpenAI/utils/types.d.ts +12 -12
- package/dist/nodes/GitHubCopilotTest/GitHubCopilotTest.node.d.ts +1 -1
- package/dist/nodes/GitHubCopilotTest/GitHubCopilotTest.node.js +120 -116
- package/dist/package.json +1 -1
- package/package.json +1 -1
|
@@ -5,50 +5,50 @@ const ModelProperties_1 = require("../../shared/properties/ModelProperties");
|
|
|
5
5
|
exports.nodeProperties = [
|
|
6
6
|
...ModelProperties_1.CHAT_MODEL_PROPERTIES,
|
|
7
7
|
{
|
|
8
|
-
displayName:
|
|
9
|
-
name:
|
|
10
|
-
type:
|
|
8
|
+
displayName: 'Messages Input Mode',
|
|
9
|
+
name: 'messagesInputMode',
|
|
10
|
+
type: 'options',
|
|
11
11
|
options: [
|
|
12
12
|
{
|
|
13
|
-
name:
|
|
14
|
-
value:
|
|
15
|
-
description:
|
|
13
|
+
name: 'Manual (UI)',
|
|
14
|
+
value: 'manual',
|
|
15
|
+
description: 'Enter messages one by one using the UI',
|
|
16
16
|
},
|
|
17
17
|
{
|
|
18
|
-
name:
|
|
19
|
-
value:
|
|
20
|
-
description:
|
|
18
|
+
name: 'JSON (Programmatic)',
|
|
19
|
+
value: 'json',
|
|
20
|
+
description: 'Provide messages as JSON array',
|
|
21
21
|
},
|
|
22
22
|
],
|
|
23
|
-
default:
|
|
24
|
-
description:
|
|
23
|
+
default: 'manual',
|
|
24
|
+
description: 'How to provide the messages for the conversation',
|
|
25
25
|
},
|
|
26
26
|
{
|
|
27
|
-
displayName:
|
|
28
|
-
name:
|
|
29
|
-
type:
|
|
30
|
-
default: `[
|
|
31
|
-
{
|
|
32
|
-
"role": "system",
|
|
33
|
-
"content": "You are a helpful assistant."
|
|
34
|
-
},
|
|
35
|
-
{
|
|
36
|
-
"role": "user",
|
|
37
|
-
"content": "Hello!"
|
|
38
|
-
}
|
|
27
|
+
displayName: 'Messages (JSON)',
|
|
28
|
+
name: 'messagesJson',
|
|
29
|
+
type: 'json',
|
|
30
|
+
default: `[
|
|
31
|
+
{
|
|
32
|
+
"role": "system",
|
|
33
|
+
"content": "You are a helpful assistant."
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"role": "user",
|
|
37
|
+
"content": "Hello!"
|
|
38
|
+
}
|
|
39
39
|
]`,
|
|
40
|
-
placeholder:
|
|
41
|
-
description:
|
|
40
|
+
placeholder: 'Enter messages as JSON array',
|
|
41
|
+
description: 'Array of messages in OpenAI format: [{"role": "user", "content": "..."}]',
|
|
42
42
|
displayOptions: {
|
|
43
43
|
show: {
|
|
44
|
-
messagesInputMode: [
|
|
44
|
+
messagesInputMode: ['json'],
|
|
45
45
|
},
|
|
46
46
|
},
|
|
47
47
|
},
|
|
48
48
|
{
|
|
49
|
-
displayName:
|
|
50
|
-
name:
|
|
51
|
-
type:
|
|
49
|
+
displayName: 'Messages',
|
|
50
|
+
name: 'messages',
|
|
51
|
+
type: 'fixedCollection',
|
|
52
52
|
typeOptions: {
|
|
53
53
|
multipleValues: true,
|
|
54
54
|
sortable: true,
|
|
@@ -56,269 +56,269 @@ exports.nodeProperties = [
|
|
|
56
56
|
default: {
|
|
57
57
|
message: [
|
|
58
58
|
{
|
|
59
|
-
role:
|
|
60
|
-
content:
|
|
59
|
+
role: 'user',
|
|
60
|
+
content: '',
|
|
61
61
|
},
|
|
62
62
|
],
|
|
63
63
|
},
|
|
64
64
|
displayOptions: {
|
|
65
65
|
show: {
|
|
66
|
-
messagesInputMode: [
|
|
66
|
+
messagesInputMode: ['manual'],
|
|
67
67
|
},
|
|
68
68
|
},
|
|
69
69
|
options: [
|
|
70
70
|
{
|
|
71
|
-
name:
|
|
72
|
-
displayName:
|
|
71
|
+
name: 'message',
|
|
72
|
+
displayName: 'Message',
|
|
73
73
|
values: [
|
|
74
74
|
{
|
|
75
|
-
displayName:
|
|
76
|
-
name:
|
|
77
|
-
type:
|
|
75
|
+
displayName: 'Role',
|
|
76
|
+
name: 'role',
|
|
77
|
+
type: 'options',
|
|
78
78
|
options: [
|
|
79
79
|
{
|
|
80
|
-
name:
|
|
81
|
-
value:
|
|
82
|
-
description:
|
|
80
|
+
name: 'System',
|
|
81
|
+
value: 'system',
|
|
82
|
+
description: 'System message to set the behavior of the AI',
|
|
83
83
|
},
|
|
84
84
|
{
|
|
85
|
-
name:
|
|
86
|
-
value:
|
|
87
|
-
description:
|
|
85
|
+
name: 'User',
|
|
86
|
+
value: 'user',
|
|
87
|
+
description: 'Message from the user',
|
|
88
88
|
},
|
|
89
89
|
{
|
|
90
|
-
name:
|
|
91
|
-
value:
|
|
92
|
-
description:
|
|
90
|
+
name: 'Assistant',
|
|
91
|
+
value: 'assistant',
|
|
92
|
+
description: 'Previous response from the AI assistant',
|
|
93
93
|
},
|
|
94
94
|
],
|
|
95
|
-
default:
|
|
95
|
+
default: 'user',
|
|
96
96
|
},
|
|
97
97
|
{
|
|
98
|
-
displayName:
|
|
99
|
-
name:
|
|
100
|
-
type:
|
|
98
|
+
displayName: 'Content',
|
|
99
|
+
name: 'content',
|
|
100
|
+
type: 'string',
|
|
101
101
|
typeOptions: {
|
|
102
102
|
rows: 3,
|
|
103
103
|
},
|
|
104
|
-
default:
|
|
105
|
-
placeholder:
|
|
106
|
-
description:
|
|
104
|
+
default: '',
|
|
105
|
+
placeholder: 'Enter message content...',
|
|
106
|
+
description: 'The content of the message',
|
|
107
107
|
},
|
|
108
108
|
{
|
|
109
|
-
displayName:
|
|
110
|
-
name:
|
|
111
|
-
type:
|
|
109
|
+
displayName: 'Type',
|
|
110
|
+
name: 'type',
|
|
111
|
+
type: 'options',
|
|
112
112
|
options: [
|
|
113
113
|
{
|
|
114
|
-
name:
|
|
115
|
-
value:
|
|
116
|
-
description:
|
|
114
|
+
name: 'Text',
|
|
115
|
+
value: 'text',
|
|
116
|
+
description: 'Regular text message',
|
|
117
117
|
},
|
|
118
118
|
{
|
|
119
|
-
name:
|
|
120
|
-
value:
|
|
121
|
-
description:
|
|
119
|
+
name: 'File',
|
|
120
|
+
value: 'file',
|
|
121
|
+
description: 'File attachment (use content as data URL or base64)',
|
|
122
122
|
},
|
|
123
123
|
],
|
|
124
|
-
default:
|
|
125
|
-
description:
|
|
124
|
+
default: 'text',
|
|
125
|
+
description: 'The type of message content (optional)',
|
|
126
126
|
},
|
|
127
127
|
],
|
|
128
128
|
},
|
|
129
129
|
],
|
|
130
|
-
description:
|
|
130
|
+
description: 'Array of messages for the conversation',
|
|
131
131
|
},
|
|
132
132
|
{
|
|
133
|
-
displayName:
|
|
134
|
-
name:
|
|
135
|
-
type:
|
|
136
|
-
placeholder:
|
|
133
|
+
displayName: 'Advanced Options',
|
|
134
|
+
name: 'advancedOptions',
|
|
135
|
+
type: 'collection',
|
|
136
|
+
placeholder: 'Add Advanced Option',
|
|
137
137
|
default: {},
|
|
138
138
|
options: [
|
|
139
139
|
{
|
|
140
|
-
displayName:
|
|
141
|
-
name:
|
|
142
|
-
type:
|
|
140
|
+
displayName: 'Response Format',
|
|
141
|
+
name: 'response_format',
|
|
142
|
+
type: 'options',
|
|
143
143
|
options: [
|
|
144
144
|
{
|
|
145
|
-
name:
|
|
146
|
-
value:
|
|
147
|
-
description:
|
|
145
|
+
name: 'Text',
|
|
146
|
+
value: 'text',
|
|
147
|
+
description: 'Return response as plain text',
|
|
148
148
|
},
|
|
149
149
|
{
|
|
150
|
-
name:
|
|
151
|
-
value:
|
|
152
|
-
description:
|
|
150
|
+
name: 'JSON Object',
|
|
151
|
+
value: 'json_object',
|
|
152
|
+
description: 'Return response as JSON object',
|
|
153
153
|
},
|
|
154
154
|
],
|
|
155
|
-
default:
|
|
156
|
-
description:
|
|
155
|
+
default: 'text',
|
|
156
|
+
description: 'The format of the response',
|
|
157
157
|
},
|
|
158
158
|
{
|
|
159
|
-
displayName:
|
|
160
|
-
name:
|
|
161
|
-
type:
|
|
159
|
+
displayName: 'Temperature',
|
|
160
|
+
name: 'temperature',
|
|
161
|
+
type: 'number',
|
|
162
162
|
typeOptions: {
|
|
163
163
|
minValue: 0,
|
|
164
164
|
maxValue: 2,
|
|
165
165
|
numberPrecision: 2,
|
|
166
166
|
},
|
|
167
167
|
default: 1,
|
|
168
|
-
description:
|
|
168
|
+
description: 'Controls randomness in the response. Lower values make responses more focused and deterministic.',
|
|
169
169
|
},
|
|
170
170
|
{
|
|
171
|
-
displayName:
|
|
172
|
-
name:
|
|
173
|
-
type:
|
|
171
|
+
displayName: 'Max Tokens',
|
|
172
|
+
name: 'max_tokens',
|
|
173
|
+
type: 'number',
|
|
174
174
|
typeOptions: {
|
|
175
175
|
minValue: 1,
|
|
176
176
|
maxValue: 16384,
|
|
177
177
|
},
|
|
178
178
|
default: 4096,
|
|
179
|
-
placeholder:
|
|
180
|
-
description:
|
|
181
|
-
hint:
|
|
179
|
+
placeholder: '4096',
|
|
180
|
+
description: 'Maximum number of tokens to generate in the response',
|
|
181
|
+
hint: 'Default: 4096 tokens. Increase for longer responses, decrease for shorter ones.',
|
|
182
182
|
},
|
|
183
183
|
{
|
|
184
|
-
displayName:
|
|
185
|
-
name:
|
|
186
|
-
type:
|
|
184
|
+
displayName: 'Top P',
|
|
185
|
+
name: 'top_p',
|
|
186
|
+
type: 'number',
|
|
187
187
|
typeOptions: {
|
|
188
188
|
minValue: 0,
|
|
189
189
|
maxValue: 1,
|
|
190
190
|
numberPrecision: 2,
|
|
191
191
|
},
|
|
192
192
|
default: 1,
|
|
193
|
-
description:
|
|
193
|
+
description: 'Controls diversity via nucleus sampling',
|
|
194
194
|
},
|
|
195
195
|
{
|
|
196
|
-
displayName:
|
|
197
|
-
name:
|
|
198
|
-
type:
|
|
196
|
+
displayName: 'Frequency Penalty',
|
|
197
|
+
name: 'frequency_penalty',
|
|
198
|
+
type: 'number',
|
|
199
199
|
typeOptions: {
|
|
200
200
|
minValue: -2,
|
|
201
201
|
maxValue: 2,
|
|
202
202
|
numberPrecision: 2,
|
|
203
203
|
},
|
|
204
204
|
default: 0,
|
|
205
|
-
description:
|
|
205
|
+
description: 'Penalty for repeated tokens based on their frequency',
|
|
206
206
|
},
|
|
207
207
|
{
|
|
208
|
-
displayName:
|
|
209
|
-
name:
|
|
210
|
-
type:
|
|
208
|
+
displayName: 'Presence Penalty',
|
|
209
|
+
name: 'presence_penalty',
|
|
210
|
+
type: 'number',
|
|
211
211
|
typeOptions: {
|
|
212
212
|
minValue: -2,
|
|
213
213
|
maxValue: 2,
|
|
214
214
|
numberPrecision: 2,
|
|
215
215
|
},
|
|
216
216
|
default: 0,
|
|
217
|
-
description:
|
|
217
|
+
description: 'Penalty for repeated tokens based on their presence',
|
|
218
218
|
},
|
|
219
219
|
{
|
|
220
|
-
displayName:
|
|
221
|
-
name:
|
|
222
|
-
type:
|
|
223
|
-
default:
|
|
224
|
-
placeholder:
|
|
225
|
-
description:
|
|
220
|
+
displayName: 'Stop Sequences',
|
|
221
|
+
name: 'stop',
|
|
222
|
+
type: 'string',
|
|
223
|
+
default: '',
|
|
224
|
+
placeholder: '["\\n", "Human:", "AI:"]',
|
|
225
|
+
description: 'JSON array of strings where the API will stop generating tokens',
|
|
226
226
|
},
|
|
227
227
|
{
|
|
228
|
-
displayName:
|
|
229
|
-
name:
|
|
230
|
-
type:
|
|
228
|
+
displayName: 'Stream',
|
|
229
|
+
name: 'stream',
|
|
230
|
+
type: 'boolean',
|
|
231
231
|
default: false,
|
|
232
|
-
description:
|
|
232
|
+
description: 'Whether to stream the response',
|
|
233
233
|
},
|
|
234
234
|
{
|
|
235
|
-
displayName:
|
|
236
|
-
name:
|
|
237
|
-
type:
|
|
235
|
+
displayName: 'Seed',
|
|
236
|
+
name: 'seed',
|
|
237
|
+
type: 'number',
|
|
238
238
|
default: 0,
|
|
239
|
-
placeholder:
|
|
240
|
-
description:
|
|
239
|
+
placeholder: '12345',
|
|
240
|
+
description: 'Seed for deterministic sampling (0 = disabled)',
|
|
241
241
|
},
|
|
242
242
|
{
|
|
243
|
-
displayName:
|
|
244
|
-
name:
|
|
245
|
-
type:
|
|
246
|
-
default:
|
|
247
|
-
placeholder:
|
|
248
|
-
description:
|
|
243
|
+
displayName: 'User ID',
|
|
244
|
+
name: 'user',
|
|
245
|
+
type: 'string',
|
|
246
|
+
default: '',
|
|
247
|
+
placeholder: 'user-123',
|
|
248
|
+
description: 'Unique identifier for the end-user',
|
|
249
249
|
},
|
|
250
250
|
{
|
|
251
|
-
displayName:
|
|
252
|
-
name:
|
|
253
|
-
type:
|
|
254
|
-
default:
|
|
251
|
+
displayName: 'Tools (Function Calling)',
|
|
252
|
+
name: 'tools',
|
|
253
|
+
type: 'string',
|
|
254
|
+
default: '',
|
|
255
255
|
typeOptions: {
|
|
256
256
|
rows: 10,
|
|
257
257
|
},
|
|
258
|
-
placeholder: `[
|
|
259
|
-
{
|
|
260
|
-
"type": "function",
|
|
261
|
-
"function": {
|
|
262
|
-
"name": "get_weather",
|
|
263
|
-
"description": "Get current weather",
|
|
264
|
-
"parameters": {
|
|
265
|
-
"type": "object",
|
|
266
|
-
"properties": {
|
|
267
|
-
"location": {
|
|
268
|
-
"type": "string",
|
|
269
|
-
"description": "City name"
|
|
270
|
-
}
|
|
271
|
-
},
|
|
272
|
-
"required": ["location"]
|
|
273
|
-
}
|
|
274
|
-
}
|
|
275
|
-
}
|
|
258
|
+
placeholder: `[
|
|
259
|
+
{
|
|
260
|
+
"type": "function",
|
|
261
|
+
"function": {
|
|
262
|
+
"name": "get_weather",
|
|
263
|
+
"description": "Get current weather",
|
|
264
|
+
"parameters": {
|
|
265
|
+
"type": "object",
|
|
266
|
+
"properties": {
|
|
267
|
+
"location": {
|
|
268
|
+
"type": "string",
|
|
269
|
+
"description": "City name"
|
|
270
|
+
}
|
|
271
|
+
},
|
|
272
|
+
"required": ["location"]
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
276
|
]`,
|
|
277
|
-
description:
|
|
277
|
+
description: 'Optional: Array of tools/functions available to the model (OpenAI format). Leave empty if not using function calling.',
|
|
278
278
|
hint: "JSON array of tool definitions in OpenAI format. Leave this field empty if you don't need function calling.",
|
|
279
279
|
},
|
|
280
280
|
{
|
|
281
|
-
displayName:
|
|
282
|
-
name:
|
|
283
|
-
type:
|
|
281
|
+
displayName: 'Tool Choice',
|
|
282
|
+
name: 'tool_choice',
|
|
283
|
+
type: 'options',
|
|
284
284
|
options: [
|
|
285
285
|
{
|
|
286
|
-
name:
|
|
287
|
-
value:
|
|
288
|
-
description:
|
|
286
|
+
name: 'Auto',
|
|
287
|
+
value: 'auto',
|
|
288
|
+
description: 'Let the model decide whether to call functions',
|
|
289
289
|
},
|
|
290
290
|
{
|
|
291
|
-
name:
|
|
292
|
-
value:
|
|
293
|
-
description:
|
|
291
|
+
name: 'None',
|
|
292
|
+
value: 'none',
|
|
293
|
+
description: 'Force the model to not call any functions',
|
|
294
294
|
},
|
|
295
295
|
{
|
|
296
|
-
name:
|
|
297
|
-
value:
|
|
298
|
-
description:
|
|
296
|
+
name: 'Required',
|
|
297
|
+
value: 'required',
|
|
298
|
+
description: 'Force the model to call at least one function',
|
|
299
299
|
},
|
|
300
300
|
],
|
|
301
|
-
default:
|
|
302
|
-
description:
|
|
301
|
+
default: 'auto',
|
|
302
|
+
description: 'Control how the model uses tools',
|
|
303
303
|
displayOptions: {
|
|
304
304
|
show: {
|
|
305
|
-
tools: [
|
|
305
|
+
tools: ['/.+/'],
|
|
306
306
|
},
|
|
307
307
|
},
|
|
308
308
|
},
|
|
309
309
|
{
|
|
310
|
-
displayName:
|
|
311
|
-
name:
|
|
312
|
-
type:
|
|
310
|
+
displayName: 'Enable Retry',
|
|
311
|
+
name: 'enableRetry',
|
|
312
|
+
type: 'boolean',
|
|
313
313
|
default: true,
|
|
314
|
-
description:
|
|
314
|
+
description: 'Whether to retry failed requests',
|
|
315
315
|
},
|
|
316
316
|
{
|
|
317
|
-
displayName:
|
|
318
|
-
name:
|
|
319
|
-
type:
|
|
317
|
+
displayName: 'Max Retries',
|
|
318
|
+
name: 'maxRetries',
|
|
319
|
+
type: 'number',
|
|
320
320
|
default: 3,
|
|
321
|
-
description:
|
|
321
|
+
description: 'Maximum number of retries for failed requests',
|
|
322
322
|
displayOptions: {
|
|
323
323
|
show: {
|
|
324
324
|
enableRetry: [true],
|
|
@@ -326,11 +326,11 @@ exports.nodeProperties = [
|
|
|
326
326
|
},
|
|
327
327
|
},
|
|
328
328
|
{
|
|
329
|
-
displayName:
|
|
330
|
-
name:
|
|
331
|
-
type:
|
|
329
|
+
displayName: 'Retry Delay (ms)',
|
|
330
|
+
name: 'retryDelay',
|
|
331
|
+
type: 'number',
|
|
332
332
|
default: 1000,
|
|
333
|
-
description:
|
|
333
|
+
description: 'Delay between retries in milliseconds',
|
|
334
334
|
displayOptions: {
|
|
335
335
|
show: {
|
|
336
336
|
enableRetry: [true],
|
|
@@ -338,18 +338,18 @@ exports.nodeProperties = [
|
|
|
338
338
|
},
|
|
339
339
|
},
|
|
340
340
|
{
|
|
341
|
-
displayName:
|
|
342
|
-
name:
|
|
343
|
-
type:
|
|
341
|
+
displayName: 'Request Timeout (ms)',
|
|
342
|
+
name: 'timeout',
|
|
343
|
+
type: 'number',
|
|
344
344
|
default: 60000,
|
|
345
|
-
description:
|
|
345
|
+
description: 'Request timeout in milliseconds',
|
|
346
346
|
},
|
|
347
347
|
{
|
|
348
|
-
displayName:
|
|
349
|
-
name:
|
|
350
|
-
type:
|
|
348
|
+
displayName: 'Debug Mode',
|
|
349
|
+
name: 'debugMode',
|
|
350
|
+
type: 'boolean',
|
|
351
351
|
default: false,
|
|
352
|
-
description:
|
|
352
|
+
description: 'Enable debug logging',
|
|
353
353
|
},
|
|
354
354
|
],
|
|
355
355
|
},
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export * from
|
|
2
|
-
export { mapOpenAIModelToCopilot, convertOpenAIMessagesToCopilot, convertCopilotResponseToOpenAI, parseOpenAIRequest, debugLog, } from
|
|
1
|
+
export * from './types';
|
|
2
|
+
export { mapOpenAIModelToCopilot, convertOpenAIMessagesToCopilot, convertCopilotResponseToOpenAI, parseOpenAIRequest, debugLog, } from './openaiCompat';
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { IDataObject, IExecuteFunctions } from
|
|
1
|
+
import { IDataObject, IExecuteFunctions } from 'n8n-workflow';
|
|
2
2
|
export interface OpenAIMessage {
|
|
3
|
-
role:
|
|
3
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
4
4
|
content: string;
|
|
5
5
|
name?: string;
|
|
6
6
|
tool_calls?: ToolCall[];
|
|
@@ -8,14 +8,14 @@ export interface OpenAIMessage {
|
|
|
8
8
|
}
|
|
9
9
|
export interface ToolCall {
|
|
10
10
|
id: string;
|
|
11
|
-
type:
|
|
11
|
+
type: 'function';
|
|
12
12
|
function: {
|
|
13
13
|
name: string;
|
|
14
14
|
arguments: string;
|
|
15
15
|
};
|
|
16
16
|
}
|
|
17
17
|
export interface OpenAITool {
|
|
18
|
-
type:
|
|
18
|
+
type: 'function';
|
|
19
19
|
function: {
|
|
20
20
|
name: string;
|
|
21
21
|
description: string;
|
|
@@ -26,14 +26,14 @@ export interface OpenAIRequest {
|
|
|
26
26
|
model: string;
|
|
27
27
|
messages: OpenAIMessage[];
|
|
28
28
|
tools?: OpenAITool[];
|
|
29
|
-
tool_choice?:
|
|
30
|
-
type:
|
|
29
|
+
tool_choice?: 'auto' | 'none' | 'required' | {
|
|
30
|
+
type: 'function';
|
|
31
31
|
function: {
|
|
32
32
|
name: string;
|
|
33
33
|
};
|
|
34
34
|
};
|
|
35
35
|
response_format?: {
|
|
36
|
-
type:
|
|
36
|
+
type: 'text' | 'json_object';
|
|
37
37
|
};
|
|
38
38
|
temperature?: number;
|
|
39
39
|
max_tokens?: number;
|
|
@@ -47,17 +47,17 @@ export interface OpenAIRequest {
|
|
|
47
47
|
}
|
|
48
48
|
export interface OpenAIResponse {
|
|
49
49
|
id: string;
|
|
50
|
-
object:
|
|
50
|
+
object: 'chat.completion';
|
|
51
51
|
created: number;
|
|
52
52
|
model: string;
|
|
53
53
|
choices: Array<{
|
|
54
54
|
index: number;
|
|
55
55
|
message: {
|
|
56
|
-
role:
|
|
56
|
+
role: 'assistant';
|
|
57
57
|
content: string | null;
|
|
58
58
|
tool_calls?: ToolCall[];
|
|
59
59
|
};
|
|
60
|
-
finish_reason:
|
|
60
|
+
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter';
|
|
61
61
|
}>;
|
|
62
62
|
usage: {
|
|
63
63
|
prompt_tokens: number;
|