@cognigy/rest-api-client 2025.12.0 → 2025.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -1
  4. package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  5. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  6. package/build/shared/charts/descriptors/index.js +5 -0
  7. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  8. package/build/shared/charts/descriptors/message/question/question.js +12 -1
  9. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  10. package/build/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  11. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +71 -175
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
  14. package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
  15. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  16. package/build/shared/charts/descriptors/service/index.js +11 -1
  17. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +959 -0
  18. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
  19. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
  20. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
  21. package/build/shared/constants.js +1 -5
  22. package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
  23. package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  24. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  25. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  26. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  27. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  28. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  29. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
  30. package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
  31. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  32. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  35. package/build/shared/interfaces/security/IPermission.js +4 -0
  36. package/build/shared/interfaces/security/IRole.js +5 -1
  37. package/build/shared/interfaces/security/index.js +1 -1
  38. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  39. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -1
  40. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  41. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  42. package/dist/esm/shared/charts/descriptors/index.js +6 -1
  43. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  44. package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
  45. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  46. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  47. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +72 -176
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
  50. package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
  51. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  52. package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
  53. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +946 -0
  54. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
  55. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
  56. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
  57. package/dist/esm/shared/constants.js +1 -5
  58. package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
  59. package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  60. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  61. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  62. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  63. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  64. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  65. package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
  66. package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  68. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  69. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/security/IPermission.js +4 -0
  72. package/dist/esm/shared/interfaces/security/IRole.js +5 -1
  73. package/dist/esm/shared/interfaces/security/index.js +1 -1
  74. package/package.json +1 -1
  75. package/types/index.d.ts +2093 -1927
@@ -0,0 +1,959 @@
1
+ "use strict";
2
+ var __rest = (this && this.__rest) || function (s, e) {
3
+ var t = {};
4
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
5
+ t[p] = s[p];
6
+ if (s != null && typeof Object.getOwnPropertySymbols === "function")
7
+ for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
8
+ if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
9
+ t[p[i]] = s[p[i]];
10
+ }
11
+ return t;
12
+ };
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ exports.LLM_PROMPT_V2 = void 0;
15
+ /* Custom modules */
16
+ const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
17
+ const logic_1 = require("../../logic");
18
+ const crypto_1 = require("crypto");
19
+ const createToolDefinitions_1 = require("../aiAgent/helpers/createToolDefinitions");
20
+ const prompt_1 = require("../../nlu/generativeSlotFiller/prompt");
21
+ const errors_1 = require("../../../../errors");
22
+ const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
23
+ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
24
+ type: "llmPromptV2",
25
+ defaultLabel: "LLM Prompt",
26
+ summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
27
+ constraints: {
28
+ collapsable: true,
29
+ placement: {
30
+ children: {
31
+ whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool"],
32
+ },
33
+ },
34
+ },
35
+ behavior: {
36
+ entrypoint: true
37
+ },
38
+ dependencies: {
39
+ children: ["llmPromptDefault", "llmPromptTool"]
40
+ },
41
+ preview: {
42
+ type: "text",
43
+ key: "prompt",
44
+ },
45
+ fields: [
46
+ {
47
+ key: "llmProviderReferenceId",
48
+ type: "llmSelect",
49
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LLM_SELECT__LABEL",
50
+ defaultValue: "default",
51
+ params: {
52
+ required: true
53
+ }
54
+ },
55
+ {
56
+ key: "prompt",
57
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__LABEL",
58
+ type: "cognigyLLMText",
59
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__DESCRIPTION",
60
+ params: {
61
+ multiline: true,
62
+ rows: 5,
63
+ required: false
64
+ },
65
+ defaultValue: ""
66
+ },
67
+ {
68
+ key: "chatTranscriptSteps",
69
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__LABEL",
70
+ type: "slider",
71
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__DESCRIPTION",
72
+ defaultValue: 50,
73
+ params: {
74
+ min: 0,
75
+ max: 50,
76
+ step: 1
77
+ },
78
+ condition: {
79
+ key: "usePromptMode",
80
+ value: false,
81
+ }
82
+ },
83
+ {
84
+ key: "usePromptMode",
85
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_PROMPT__LABEL",
86
+ type: "toggle",
87
+ params: {
88
+ required: true
89
+ },
90
+ defaultValue: false
91
+ },
92
+ {
93
+ key: "samplingMethod",
94
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__LABEL",
95
+ type: "select",
96
+ defaultValue: "temperature",
97
+ params: {
98
+ options: [
99
+ {
100
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TEMPERATURE__LABEL",
101
+ value: "temperature"
102
+ },
103
+ {
104
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TOP_PERCENTATGE__LABEL",
105
+ value: "topP"
106
+ }
107
+ ]
108
+ }
109
+ },
110
+ {
111
+ key: "temperature",
112
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__LABEL",
113
+ type: "slider",
114
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__DESCRIPTION",
115
+ defaultValue: 0.7,
116
+ params: {
117
+ min: 0,
118
+ max: 1,
119
+ step: 0.1
120
+ },
121
+ condition: {
122
+ key: "samplingMethod",
123
+ value: "temperature",
124
+ }
125
+ },
126
+ {
127
+ key: "topP",
128
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__LABEL",
129
+ type: "slider",
130
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__DESCRIPTION",
131
+ defaultValue: 1,
132
+ params: {
133
+ min: 0,
134
+ max: 1,
135
+ step: 0.1
136
+ },
137
+ condition: {
138
+ key: "samplingMethod",
139
+ value: "topP",
140
+ }
141
+ },
142
+ {
143
+ key: "maxTokens",
144
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__LABEL",
145
+ type: "slider",
146
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__DESCRIPTION",
147
+ defaultValue: 1000,
148
+ params: {
149
+ min: 100,
150
+ max: 16000,
151
+ step: 100
152
+ }
153
+ },
154
+ {
155
+ key: "frequencyPenalty",
156
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__LABEL",
157
+ type: "slider",
158
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__DESCRIPTION",
159
+ defaultValue: 0,
160
+ params: {
161
+ min: -2,
162
+ max: 2,
163
+ step: 0.1
164
+ }
165
+ },
166
+ {
167
+ key: "presencePenalty",
168
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__LABEL",
169
+ type: "slider",
170
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__DESCRIPTION",
171
+ defaultValue: 0,
172
+ params: {
173
+ min: -2,
174
+ max: 2,
175
+ step: 0.1
176
+ }
177
+ },
178
+ {
179
+ key: "useStop",
180
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__LABEL",
181
+ type: "toggle",
182
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__DESCRIPTION",
183
+ defaultValue: false
184
+ },
185
+ {
186
+ key: "stop",
187
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__LABEL",
188
+ type: "textArray",
189
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__DESCRIPTION",
190
+ condition: {
191
+ key: "useStop",
192
+ value: true
193
+ }
194
+ },
195
+ {
196
+ key: "timeout",
197
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__LABEL",
198
+ defaultValue: 8000,
199
+ type: "number",
200
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__DESCRIPTION",
201
+ },
202
+ {
203
+ key: "storeLocation",
204
+ type: "select",
205
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__LABEL",
206
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__DESCRIPTION",
207
+ defaultValue: "stream",
208
+ params: {
209
+ options: [
210
+ {
211
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
212
+ value: "input"
213
+ },
214
+ {
215
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
216
+ value: "context"
217
+ },
218
+ {
219
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__STREAM__LABEL",
220
+ value: "stream"
221
+ }
222
+ ],
223
+ required: true
224
+ },
225
+ },
226
+ {
227
+ key: "immediateOutput",
228
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__IMMEDIATEOUTPUT__LABEL",
229
+ type: "toggle",
230
+ defaultValue: true,
231
+ condition: {
232
+ or: [
233
+ {
234
+ key: "storeLocation",
235
+ value: "input",
236
+ },
237
+ {
238
+ key: "storeLocation",
239
+ value: "context",
240
+ }
241
+ ]
242
+ }
243
+ },
244
+ {
245
+ key: "inputKey",
246
+ type: "cognigyText",
247
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__INPUT_KEY__LABEL",
248
+ defaultValue: "promptResult",
249
+ condition: {
250
+ or: [
251
+ {
252
+ key: "storeLocation",
253
+ value: "input",
254
+ },
255
+ {
256
+ and: [
257
+ {
258
+ key: "storeLocation",
259
+ value: "stream",
260
+ },
261
+ {
262
+ key: "streamStoreCopyInInput",
263
+ value: true,
264
+ }
265
+ ]
266
+ }
267
+ ]
268
+ }
269
+ },
270
+ {
271
+ key: "contextKey",
272
+ type: "cognigyText",
273
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CONTEXT_KEY__LABEL",
274
+ defaultValue: "promptResult",
275
+ condition: {
276
+ key: "storeLocation",
277
+ value: "context",
278
+ }
279
+ },
280
+ {
281
+ key: "streamStopTokens",
282
+ type: "textArray",
283
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
284
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
285
+ defaultValue: [".", "!", "?", "\\n"],
286
+ condition: {
287
+ key: "storeLocation",
288
+ value: "stream",
289
+ }
290
+ },
291
+ {
292
+ key: "streamStopTokenOverrides",
293
+ type: "textArray",
294
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__LABEL",
295
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__DESCRIPTION",
296
+ defaultValue: ["\d+\."],
297
+ condition: {
298
+ key: "storeLocation",
299
+ value: "stream",
300
+ }
301
+ },
302
+ {
303
+ key: "streamDescription",
304
+ type: "description",
305
+ label: " ",
306
+ params: {
307
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_DESCRIPTION__TEXT"
308
+ },
309
+ condition: {
310
+ key: "storeLocation",
311
+ value: "stream",
312
+ }
313
+ },
314
+ {
315
+ key: "streamStoreCopyInInput",
316
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__LABEL",
317
+ type: "toggle",
318
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__DESCRIPTION",
319
+ defaultValue: false,
320
+ condition: {
321
+ key: "storeLocation",
322
+ value: "stream",
323
+ }
324
+ },
325
+ {
326
+ key: "debugLogTokenCount",
327
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__LABEL",
328
+ type: "toggle",
329
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__DESCRIPTION",
330
+ defaultValue: false
331
+ },
332
+ {
333
+ key: "debugLogRequestAndCompletion",
334
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__LABEL",
335
+ type: "toggle",
336
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__DESCRIPTION",
337
+ defaultValue: false
338
+ },
339
+ {
340
+ key: "debugLogLLMLatency",
341
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
342
+ type: "toggle",
343
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
344
+ defaultValue: false
345
+ },
346
+ {
347
+ key: "debugDescription",
348
+ type: "description",
349
+ label: " ",
350
+ params: {
351
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUG_DESCRIPTION__TEXT"
352
+ }
353
+ },
354
+ {
355
+ key: "responseFormat",
356
+ type: "select",
357
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__LABEL",
358
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__DESCRIPTION",
359
+ defaultValue: "default",
360
+ params: {
361
+ options: [
362
+ {
363
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__DEFAULT__LABEL",
364
+ value: "default"
365
+ },
366
+ {
367
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__TEXT__LABEL",
368
+ value: "text"
369
+ },
370
+ {
371
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__JSON__LABEL",
372
+ value: "json_object"
373
+ }
374
+ ]
375
+ },
376
+ },
377
+ {
378
+ key: "seed",
379
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__LABEL",
380
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__DESCRIPTION",
381
+ type: "cognigyText",
382
+ defaultValue: ""
383
+ },
384
+ {
385
+ key: "jsonStreamWarning",
386
+ type: "description",
387
+ label: " ",
388
+ params: {
389
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__JSONSTREAMWARNING__PARAM"
390
+ },
391
+ condition: {
392
+ and: [
393
+ {
394
+ key: "responseFormat",
395
+ value: "json_object"
396
+ },
397
+ {
398
+ key: "storeLocation",
399
+ value: "stream",
400
+ }
401
+ ]
402
+ }
403
+ },
404
+ {
405
+ key: "customModelOptions",
406
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
407
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__DESCRIPTION",
408
+ type: "json",
409
+ defaultValue: {}
410
+ },
411
+ {
412
+ key: "customRequestOptions",
413
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__LABEL",
414
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__DESCRIPTION",
415
+ type: "json",
416
+ defaultValue: {}
417
+ },
418
+ {
419
+ key: "logErrorToSystem",
420
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__LABEL",
421
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
422
+ type: "toggle",
423
+ defaultValue: false,
424
+ },
425
+ {
426
+ key: "errorHandling",
427
+ type: "select",
428
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__LABEL",
429
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__DESCRIPTION",
430
+ defaultValue: "continue",
431
+ params: {
432
+ options: [
433
+ {
434
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__STOP__LABEL",
435
+ value: "stop"
436
+ },
437
+ {
438
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__CONTINUE__LABEL",
439
+ value: "continue"
440
+ },
441
+ {
442
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__GOTO__LABEL",
443
+ value: "goto"
444
+ },
445
+ ]
446
+ }
447
+ },
448
+ {
449
+ key: "errorMessage",
450
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__LABEL",
451
+ type: "cognigyText",
452
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__DESCRIPTION",
453
+ defaultValue: "",
454
+ condition: {
455
+ key: "errorHandling",
456
+ value: "continue"
457
+ }
458
+ },
459
+ {
460
+ key: "errorHandlingGotoTarget",
461
+ type: "flowNode",
462
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR__GOTO_NODE__LABEL",
463
+ condition: {
464
+ key: "errorHandling",
465
+ value: "goto"
466
+ }
467
+ },
468
+ {
469
+ key: "toolChoice",
470
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__LABEL",
471
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__DESCRIPTION",
472
+ type: "select",
473
+ defaultValue: "auto",
474
+ params: {
475
+ options: [
476
+ {
477
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__AUTO__LABEL',
478
+ value: 'auto'
479
+ },
480
+ {
481
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__REQUIRED__LABEL',
482
+ value: 'required'
483
+ },
484
+ {
485
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__NONE__LABEL',
486
+ value: 'none'
487
+ }
488
+ ]
489
+ }
490
+ },
491
+ {
492
+ key: "useStrict",
493
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__LABEL",
494
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__DESCRIPTION",
495
+ type: "toggle",
496
+ defaultValue: false
497
+ },
498
+ {
499
+ key: "processImages",
500
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__LABEL",
501
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__DESCRIPTION",
502
+ type: "toggle",
503
+ defaultValue: false
504
+ },
505
+ {
506
+ key: "transcriptImageHandling",
507
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__LABEL",
508
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__DESCRIPTION",
509
+ type: "select",
510
+ defaultValue: "minify",
511
+ params: {
512
+ options: [
513
+ {
514
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__MINIFY__LABEL",
515
+ value: "minify"
516
+ },
517
+ {
518
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__DROP__LABEL",
519
+ value: "drop"
520
+ },
521
+ {
522
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__KEEP__LABEL",
523
+ value: "keep"
524
+ }
525
+ ],
526
+ },
527
+ },
528
+ {
529
+ key: "debugLogToolDefinitions",
530
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__LABEL",
531
+ type: "toggle",
532
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__DESCRIPTION",
533
+ defaultValue: false
534
+ },
535
+ ],
536
+ sections: [
537
+ {
538
+ key: "advanced",
539
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ADVANCED__LABEL",
540
+ defaultCollapsed: true,
541
+ fields: [
542
+ "maxTokens",
543
+ "usePromptMode",
544
+ "chatTranscriptSteps",
545
+ "responseFormat",
546
+ "jsonStreamWarning",
547
+ "timeout",
548
+ "samplingMethod",
549
+ "temperature",
550
+ "topP",
551
+ "presencePenalty",
552
+ "frequencyPenalty",
553
+ "useStop",
554
+ "stop",
555
+ "seed"
556
+ ]
557
+ },
558
+ {
559
+ key: "storage",
560
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__STORAGE__LABEL",
561
+ defaultCollapsed: true,
562
+ fields: [
563
+ "storeLocation",
564
+ "jsonStreamWarning",
565
+ "streamDescription",
566
+ "inputKey",
567
+ "contextKey",
568
+ "immediateOutput",
569
+ "streamStopTokens",
570
+ "streamStopTokenOverrides",
571
+ "streamStoreCopyInInput"
572
+ ]
573
+ },
574
+ {
575
+ key: "errorHandling",
576
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ERROR_HANDLING__LABEL",
577
+ defaultCollapsed: true,
578
+ fields: [
579
+ "logErrorToSystem",
580
+ "errorHandling",
581
+ "errorMessage",
582
+ "errorHandlingGotoTarget",
583
+ ]
584
+ },
585
+ {
586
+ key: "customOptions",
587
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__CUSTOM_OPTIONS__LABEL",
588
+ defaultCollapsed: true,
589
+ fields: [
590
+ "customModelOptions",
591
+ "customRequestOptions"
592
+ ]
593
+ },
594
+ {
595
+ key: "debugging",
596
+ label: "UI__NODE_EDITOR__SECTIONS__DEBUG_SETTINGS__LABEL",
597
+ defaultCollapsed: true,
598
+ fields: [
599
+ "debugDescription",
600
+ "debugLogTokenCount",
601
+ "debugLogRequestAndCompletion",
602
+ "debugLogLLMLatency",
603
+ "debugLogToolDefinitions"
604
+ ]
605
+ },
606
+ {
607
+ key: "toolSettings",
608
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__TOOL_SETTINGS__LABEL",
609
+ defaultCollapsed: true,
610
+ fields: [
611
+ "toolChoice",
612
+ "useStrict",
613
+ ],
614
+ },
615
+ {
616
+ key: "imageHandling",
617
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__IMAGE_HANDLING__LABEL",
618
+ defaultCollapsed: true,
619
+ fields: [
620
+ "processImages",
621
+ "transcriptImageHandling"
622
+ ]
623
+ },
624
+ ],
625
+ form: [
626
+ { type: "field", key: "llmProviderReferenceId" },
627
+ { type: "field", key: "prompt" },
628
+ { type: "section", key: "advanced" },
629
+ { type: "section", key: "storage" },
630
+ { type: "section", key: "toolSettings" },
631
+ { type: "section", key: "imageHandling" },
632
+ { type: "section", key: "errorHandling" },
633
+ { type: "section", key: "customOptions" },
634
+ { type: "section", key: "debugging" },
635
+ ],
636
+ appearance: {
637
+ color: "#252525",
638
+ },
639
+ tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
640
+ function: async ({ cognigy, config, childConfigs, nodeId }) => {
641
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
642
+ const { api, input, flowReferenceId } = cognigy;
643
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
644
+ errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
645
+ let prompt = config.prompt || "";
646
+ const { traceId } = input;
647
+ // check if custom variables are used and if they have a length modifier
648
+ // works only for a single variable per prompt
649
+ if (prompt.includes("@cognigyRecentConversation")) {
650
+ let turnLimit;
651
+ if (prompt.match(/@cognigyRecentConversation:(\d+)/)) {
652
+ // @cognigyRecentConversation has a length modifier (e.g. @cognigyRecentConversation:5), so we just want to return the top 5 turns
653
+ turnLimit = Number((_a = prompt.match(/@cognigyRecentConversation:(\d+)/)) === null || _a === void 0 ? void 0 : _a[1]);
654
+ }
655
+ const recentConversation = (0, prompt_1.createLastConverationString)(cognigy.lastConversationEntries, turnLimit) + "\n";
656
+ prompt = prompt.replace(/@cognigyRecentConversation(:\d+)?/, recentConversation);
657
+ }
658
+ if (prompt.includes("@cognigyRecentUserInputs")) {
659
+ let turnLimit;
660
+ if (prompt.match(/@cognigyRecentUserInputs:(\d+)/)) {
661
+ // @cognigyRecentUserInputs has a length modifier (e.g. @cognigyRecentUserInputs:5), so we just want to return the top 5 entries
662
+ turnLimit = Number((_b = prompt.match(/@cognigyRecentUserInputs:(\d+)/)) === null || _b === void 0 ? void 0 : _b[1]);
663
+ }
664
+ const recentUserInputs = (0, prompt_1.createLastUserInputString)(cognigy.lastConversationEntries, turnLimit) + "\n";
665
+ prompt = prompt.replace(/@cognigyRecentUserInputs(:\d+)?/, recentUserInputs);
666
+ }
667
+ // handle errors from external services, depending on the settings
668
+ const handleServiceError = async (error) => {
669
+ var _a, _b, _c, _d, _e, _f;
670
+ const compactError = {
671
+ name: error === null || error === void 0 ? void 0 : error.name,
672
+ code: error === null || error === void 0 ? void 0 : error.code,
673
+ message: (error === null || error === void 0 ? void 0 : error.message) || error
674
+ };
675
+ // return the requestId if it exist in the error obj.
676
+ if ((_a = error === null || error === void 0 ? void 0 : error.meta) === null || _a === void 0 ? void 0 : _a.requestId) {
677
+ compactError["requestId"] = (_b = error === null || error === void 0 ? void 0 : error.meta) === null || _b === void 0 ? void 0 : _b.requestId;
678
+ }
679
+ if ((_c = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _c === void 0 ? void 0 : _c.code) {
680
+ compactError.code = error.originalErrorDetails.code;
681
+ }
682
+ const errorResponse = {
683
+ error: compactError,
684
+ };
685
+ // add error to context or input
686
+ switch (storeLocation) {
687
+ case "context":
688
+ (_d = api.addToContext) === null || _d === void 0 ? void 0 : _d.call(api, contextKey, errorResponse, "simple");
689
+ break;
690
+ default:
691
+ api.addToInput(inputKey, errorResponse);
692
+ }
693
+ if (errorHandling === "continue") {
694
+ // output the timeout message
695
+ if (errorMessage) {
696
+ await ((_e = api.output) === null || _e === void 0 ? void 0 : _e.call(api, errorMessage, null));
697
+ }
698
+ // Continue with default node as next node
699
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
700
+ if (defaultChild) {
701
+ api.setNextNode(defaultChild.id);
702
+ }
703
+ }
704
+ else if (errorHandling === "goto") {
705
+ if (!errorHandlingGotoTarget) {
706
+ throw new Error("GoTo Target is required");
707
+ }
708
+ const gotoParams = {
709
+ cognigy,
710
+ childConfigs: [],
711
+ nodeId,
712
+ config: {
713
+ flowNode: {
714
+ flow: errorHandlingGotoTarget.flow,
715
+ node: errorHandlingGotoTarget.node,
716
+ },
717
+ injectedText: input.text,
718
+ injectedData: input.data,
719
+ executionMode: "continue",
720
+ absorbContext: false
721
+ }
722
+ };
723
+ await ((_f = logic_1.GO_TO.function) === null || _f === void 0 ? void 0 : _f.call(logic_1.GO_TO, gotoParams));
724
+ }
725
+ else {
726
+ throw new errors_1.InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
727
+ }
728
+ };
729
+ try {
730
+ const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
731
+ const _messageId = (0, crypto_1.randomUUID)();
732
+ // Start measuring LLM latency and time to first output if debug flag is enabled
733
+ let firstOutputTime = null;
734
+ /**
735
+ * Retrieve the tool definitions from the child nodes
736
+ */
737
+ const { toolIds, toolNames, toolMap, tools } = await (0, createToolDefinitions_1.createToolDefinitions)(childConfigs, api, useStrict);
738
+ /**
739
+ * Generate Prompt Options
740
+ */
741
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt,
742
+ temperature,
743
+ maxTokens,
744
+ topP,
745
+ presencePenalty,
746
+ frequencyPenalty, timeoutInMs: timeout, useCase: "promptNode", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
747
+ var _a;
748
+ text = isStreamingChannel ? text : text.trim();
749
+ if (text) {
750
+ // Record first output time for debugging if not already recorded
751
+ if (debugLogLLMLatency && firstOutputTime === null) {
752
+ firstOutputTime = Date.now();
753
+ }
754
+ // if we got text, we output it, but prevent it from being added to the transcript
755
+ (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
756
+ _cognigy: {
757
+ _preventTranscript: true,
758
+ _messageId,
759
+ }
760
+ });
761
+ }
762
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], streamStopTokenOverrides, preventNewLineRemoval: isStreamingChannel ? true : false,
763
+ // set to true in order to get token usage
764
+ detailedResults: true, seed: Number(seed) ? Number(seed) : undefined }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), { customModelOptions,
765
+ customRequestOptions });
766
+ if (useStop) {
767
+ llmPromptOptions["stop"] = stop;
768
+ }
769
+ // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
770
+ if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
771
+ llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
772
+ }
773
+ if (processImages) {
774
+ llmPromptOptions["imageOptions"] = {
775
+ processImages,
776
+ transcriptImageHandling
777
+ };
778
+ }
779
+ if (responseFormat && responseFormat !== "default") {
780
+ llmPromptOptions["responseFormat"] = responseFormat;
781
+ }
782
+ let debugPrompt = prompt;
783
+ // if we're not using prompt mode, we need to add the system message and the transcript
784
+ // this is the equivalent of the old "useChat" mode
785
+ if (!usePromptMode) {
786
+ const transcript = await api.getTranscript({
787
+ limit: chatTranscriptSteps || 50,
788
+ rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
789
+ excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT]
790
+ });
791
+ llmPromptOptions["transcript"] = transcript;
792
+ llmPromptOptions["chat"] = [{
793
+ role: "system",
794
+ content: prompt
795
+ }];
796
+ }
797
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
798
+ // Run the LLM Query
799
+ const fullLlmResult = await api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
800
+ // End measuring times and log if debug flag is enabled
801
+ if (debugLogLLMLatency) {
802
+ const llmEndTime = Date.now();
803
+ const debugMessages = [];
804
+ const llmLatencyMs = llmEndTime - llmStartTime;
805
+ let timeToFirstOutputLabel;
806
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
807
+ timeToFirstOutputLabel = " - (tool call)";
808
+ }
809
+ else if (firstOutputTime === null) {
810
+ timeToFirstOutputLabel = " - (no output)";
811
+ }
812
+ else {
813
+ firstOutputTime = firstOutputTime || llmEndTime;
814
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
815
+ }
816
+ if (storeLocation === "stream") {
817
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
818
+ }
819
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
820
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
821
+ }
822
+ const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
823
+ const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
824
+ if (debugLogToolDefinitions) {
825
+ (_d = api.logDebugMessage) === null || _d === void 0 ? void 0 : _d.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
826
+ }
827
+ // if we're in adminconsole or following a session, process debugging options
828
+ (input.endpointType === "adminconsole" || isFollowSessionActive) && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, "llmPromptV2");
829
+ if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
830
+ const mainToolCall = llmResult.toolCalls[0];
831
+ let isMcpToolCall = false;
832
+ // Find the child node with the toolId of the tool call
833
+ let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "llmPromptTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
834
+ if (!toolChild && toolMap.has(mainToolCall.function.name)) {
835
+ // If the tool call is from an MCP tool, set the next node to the corresponding child node
836
+ toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
837
+ isMcpToolCall = true;
838
+ }
839
+ if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
840
+ (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
841
+ }
842
+ // Add last tool call to session state for loading it from Tool Answer Node
843
+ api.updateSessionStateValues({
844
+ lastToolCall: Object.assign(Object.assign({ llmProvider: fullLlmResult.llmProvider, aiAgentJobNode: {
845
+ flow: flowReferenceId,
846
+ node: nodeId,
847
+ } }, (isMcpToolCall && {
848
+ mcpServerUrl: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.mcpServerUrl,
849
+ timeout: (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.timeout,
850
+ mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
851
+ })), { toolCall: mainToolCall }),
852
+ });
853
+ // if there are any parameters/arguments, add them to the input slots
854
+ if (mainToolCall.function.arguments) {
855
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_j = (_h = input.llmPrompt) === null || _h === void 0 ? void 0 : _h.toolArgs) !== null && _j !== void 0 ? _j : {}), mainToolCall.function.arguments) });
856
+ }
857
+ // Debug Message for Tool Calls, configured in the Tool Node
858
+ if ((_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.debugMessage) {
859
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
860
+ // Arguments / Parameters Slots
861
+ const slots = ((_l = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _l === void 0 ? void 0 : _l.arguments) && Object.keys(mainToolCall.function.arguments);
862
+ const hasSlots = slots && slots.length > 0;
863
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
864
+ if (hasSlots) {
865
+ slots.forEach(slot => {
866
+ let slotValueAsString = mainToolCall.function.arguments[slot];
867
+ if (typeof slotValueAsString === "object" && slotValueAsString !== null) {
868
+ slotValueAsString = JSON.stringify(slotValueAsString, null, 2);
869
+ }
870
+ else {
871
+ String(slotValueAsString);
872
+ }
873
+ messageLines.push(`- ${slot}: ${slotValueAsString}`);
874
+ });
875
+ }
876
+ (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
877
+ }
878
+ if (toolChild) {
879
+ api.setNextNode(toolChild.id);
880
+ }
881
+ else {
882
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
883
+ if (defaultChild) {
884
+ api.setNextNode(defaultChild.id);
885
+ }
886
+ }
887
+ }
888
+ else {
889
+ // Default case
890
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
891
+ if (defaultChild) {
892
+ api.setNextNode(defaultChild.id);
893
+ }
894
+ }
895
+ // Optionally output the result immediately
896
+ // This will also store it into the output
897
+ if (llmResult.result && immediateOutput && !llmPromptOptions.stream) {
898
+ // we stringify objects (e.g. results coming from JSON Mode)
899
+ // so that the transcript only contains text
900
+ const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
901
+ await ((_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, resultToOutput, {}));
902
+ }
903
+ else if (llmResult.finishReason && llmPromptOptions.stream) {
904
+ // send the finishReason as last output for a stream
905
+ (_p = api.output) === null || _p === void 0 ? void 0 : _p.call(api, "", {
906
+ _cognigy: {
907
+ _preventTranscript: true,
908
+ _messageId,
909
+ _finishReason: llmResult.finishReason,
910
+ }
911
+ });
912
+ }
913
+ // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
914
+ if (llmResult.result && llmPromptOptions.stream) {
915
+ const transcriptContent = {
916
+ role: transcripts_1.TranscriptRole.ASSISTANT,
917
+ type: transcripts_1.TranscriptEntryType.OUTPUT,
918
+ source: "assistant",
919
+ payload: {
920
+ text: ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult),
921
+ data: {},
922
+ }
923
+ };
924
+ await api.addTranscriptStep(transcriptContent);
925
+ }
926
+ // Add response to Cognigy Input/Context for further usage
927
+ if (storeLocation === "context") {
928
+ (_q = api.addToContext) === null || _q === void 0 ? void 0 : _q.call(api, contextKey, llmResult, "simple");
929
+ }
930
+ else if (storeLocation === "input") {
931
+ api.addToInput(inputKey, llmResult);
932
+ }
933
+ else if (storeLocation === "stream" && streamStoreCopyInInput) {
934
+ api.addToInput(inputKey, llmResult);
935
+ }
936
+ }
937
+ catch (error) {
938
+ const errorDetailsBase = {
939
+ name: error === null || error === void 0 ? void 0 : error.name,
940
+ code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
941
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_r = error.originalErrorDetails) === null || _r === void 0 ? void 0 : _r.message),
942
+ };
943
+ const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
944
+ // return the requestId if it exist in the error obj.
945
+ if ((_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId) {
946
+ errorDetails["meta"] = {
947
+ requestId: (_t = error.meta) === null || _t === void 0 ? void 0 : _t.requestId
948
+ };
949
+ }
950
+ if (logErrorToSystem) {
951
+ (_u = api.log) === null || _u === void 0 ? void 0 : _u.call(api, "error", JSON.stringify(errorDetailsBase));
952
+ }
953
+ (_v = api.logDebugError) === null || _v === void 0 ? void 0 : _v.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
954
+ await handleServiceError(errorDetails);
955
+ return;
956
+ }
957
+ }
958
+ });
959
+ //# sourceMappingURL=LLMPromptV2.js.map