@cognigy/rest-api-client 2025.12.0 → 2025.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -1
  4. package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  5. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  6. package/build/shared/charts/descriptors/index.js +5 -0
  7. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  8. package/build/shared/charts/descriptors/message/question/question.js +12 -1
  9. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  10. package/build/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  11. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +71 -175
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
  14. package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
  15. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  16. package/build/shared/charts/descriptors/service/index.js +11 -1
  17. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +959 -0
  18. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
  19. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
  20. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
  21. package/build/shared/constants.js +1 -5
  22. package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
  23. package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  24. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  25. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  26. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  27. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  28. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  29. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
  30. package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
  31. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  32. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  35. package/build/shared/interfaces/security/IPermission.js +4 -0
  36. package/build/shared/interfaces/security/IRole.js +5 -1
  37. package/build/shared/interfaces/security/index.js +1 -1
  38. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -1
  39. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -1
  40. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  41. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  42. package/dist/esm/shared/charts/descriptors/index.js +6 -1
  43. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
  44. package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
  45. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  46. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +21 -1
  47. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +72 -176
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
  50. package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
  51. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  52. package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
  53. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +946 -0
  54. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
  55. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
  56. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
  57. package/dist/esm/shared/constants.js +1 -5
  58. package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
  59. package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  60. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  61. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  62. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  63. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  64. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  65. package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
  66. package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  68. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  69. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/security/IPermission.js +4 -0
  72. package/dist/esm/shared/interfaces/security/IRole.js +5 -1
  73. package/dist/esm/shared/interfaces/security/index.js +1 -1
  74. package/package.json +1 -1
  75. package/types/index.d.ts +2093 -1927
@@ -0,0 +1,946 @@
1
+ import { __awaiter, __rest } from "tslib";
2
+ /* Custom modules */
3
+ import { createNodeDescriptor } from "../../../createNodeDescriptor";
4
+ import { GO_TO } from "../../logic";
5
+ import { randomUUID } from 'crypto';
6
+ import { createToolDefinitions } from "../aiAgent/helpers/createToolDefinitions";
7
+ import { createLastConverationString, createLastUserInputString, writeLLMDebugLogs } from "../../nlu/generativeSlotFiller/prompt";
8
+ import { InternalServerError } from "../../../../errors";
9
+ import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
10
+ export const LLM_PROMPT_V2 = createNodeDescriptor({
11
+ type: "llmPromptV2",
12
+ defaultLabel: "LLM Prompt",
13
+ summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
14
+ constraints: {
15
+ collapsable: true,
16
+ placement: {
17
+ children: {
18
+ whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool"],
19
+ },
20
+ },
21
+ },
22
+ behavior: {
23
+ entrypoint: true
24
+ },
25
+ dependencies: {
26
+ children: ["llmPromptDefault", "llmPromptTool"]
27
+ },
28
+ preview: {
29
+ type: "text",
30
+ key: "prompt",
31
+ },
32
+ fields: [
33
+ {
34
+ key: "llmProviderReferenceId",
35
+ type: "llmSelect",
36
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LLM_SELECT__LABEL",
37
+ defaultValue: "default",
38
+ params: {
39
+ required: true
40
+ }
41
+ },
42
+ {
43
+ key: "prompt",
44
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__LABEL",
45
+ type: "cognigyLLMText",
46
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__DESCRIPTION",
47
+ params: {
48
+ multiline: true,
49
+ rows: 5,
50
+ required: false
51
+ },
52
+ defaultValue: ""
53
+ },
54
+ {
55
+ key: "chatTranscriptSteps",
56
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__LABEL",
57
+ type: "slider",
58
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__DESCRIPTION",
59
+ defaultValue: 50,
60
+ params: {
61
+ min: 0,
62
+ max: 50,
63
+ step: 1
64
+ },
65
+ condition: {
66
+ key: "usePromptMode",
67
+ value: false,
68
+ }
69
+ },
70
+ {
71
+ key: "usePromptMode",
72
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_PROMPT__LABEL",
73
+ type: "toggle",
74
+ params: {
75
+ required: true
76
+ },
77
+ defaultValue: false
78
+ },
79
+ {
80
+ key: "samplingMethod",
81
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__LABEL",
82
+ type: "select",
83
+ defaultValue: "temperature",
84
+ params: {
85
+ options: [
86
+ {
87
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TEMPERATURE__LABEL",
88
+ value: "temperature"
89
+ },
90
+ {
91
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TOP_PERCENTATGE__LABEL",
92
+ value: "topP"
93
+ }
94
+ ]
95
+ }
96
+ },
97
+ {
98
+ key: "temperature",
99
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__LABEL",
100
+ type: "slider",
101
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__DESCRIPTION",
102
+ defaultValue: 0.7,
103
+ params: {
104
+ min: 0,
105
+ max: 1,
106
+ step: 0.1
107
+ },
108
+ condition: {
109
+ key: "samplingMethod",
110
+ value: "temperature",
111
+ }
112
+ },
113
+ {
114
+ key: "topP",
115
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__LABEL",
116
+ type: "slider",
117
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__DESCRIPTION",
118
+ defaultValue: 1,
119
+ params: {
120
+ min: 0,
121
+ max: 1,
122
+ step: 0.1
123
+ },
124
+ condition: {
125
+ key: "samplingMethod",
126
+ value: "topP",
127
+ }
128
+ },
129
+ {
130
+ key: "maxTokens",
131
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__LABEL",
132
+ type: "slider",
133
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__DESCRIPTION",
134
+ defaultValue: 1000,
135
+ params: {
136
+ min: 100,
137
+ max: 16000,
138
+ step: 100
139
+ }
140
+ },
141
+ {
142
+ key: "frequencyPenalty",
143
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__LABEL",
144
+ type: "slider",
145
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__DESCRIPTION",
146
+ defaultValue: 0,
147
+ params: {
148
+ min: -2,
149
+ max: 2,
150
+ step: 0.1
151
+ }
152
+ },
153
+ {
154
+ key: "presencePenalty",
155
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__LABEL",
156
+ type: "slider",
157
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__DESCRIPTION",
158
+ defaultValue: 0,
159
+ params: {
160
+ min: -2,
161
+ max: 2,
162
+ step: 0.1
163
+ }
164
+ },
165
+ {
166
+ key: "useStop",
167
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__LABEL",
168
+ type: "toggle",
169
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__DESCRIPTION",
170
+ defaultValue: false
171
+ },
172
+ {
173
+ key: "stop",
174
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__LABEL",
175
+ type: "textArray",
176
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__DESCRIPTION",
177
+ condition: {
178
+ key: "useStop",
179
+ value: true
180
+ }
181
+ },
182
+ {
183
+ key: "timeout",
184
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__LABEL",
185
+ defaultValue: 8000,
186
+ type: "number",
187
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__DESCRIPTION",
188
+ },
189
+ {
190
+ key: "storeLocation",
191
+ type: "select",
192
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__LABEL",
193
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__DESCRIPTION",
194
+ defaultValue: "stream",
195
+ params: {
196
+ options: [
197
+ {
198
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
199
+ value: "input"
200
+ },
201
+ {
202
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
203
+ value: "context"
204
+ },
205
+ {
206
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__STREAM__LABEL",
207
+ value: "stream"
208
+ }
209
+ ],
210
+ required: true
211
+ },
212
+ },
213
+ {
214
+ key: "immediateOutput",
215
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__IMMEDIATEOUTPUT__LABEL",
216
+ type: "toggle",
217
+ defaultValue: true,
218
+ condition: {
219
+ or: [
220
+ {
221
+ key: "storeLocation",
222
+ value: "input",
223
+ },
224
+ {
225
+ key: "storeLocation",
226
+ value: "context",
227
+ }
228
+ ]
229
+ }
230
+ },
231
+ {
232
+ key: "inputKey",
233
+ type: "cognigyText",
234
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__INPUT_KEY__LABEL",
235
+ defaultValue: "promptResult",
236
+ condition: {
237
+ or: [
238
+ {
239
+ key: "storeLocation",
240
+ value: "input",
241
+ },
242
+ {
243
+ and: [
244
+ {
245
+ key: "storeLocation",
246
+ value: "stream",
247
+ },
248
+ {
249
+ key: "streamStoreCopyInInput",
250
+ value: true,
251
+ }
252
+ ]
253
+ }
254
+ ]
255
+ }
256
+ },
257
+ {
258
+ key: "contextKey",
259
+ type: "cognigyText",
260
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CONTEXT_KEY__LABEL",
261
+ defaultValue: "promptResult",
262
+ condition: {
263
+ key: "storeLocation",
264
+ value: "context",
265
+ }
266
+ },
267
+ {
268
+ key: "streamStopTokens",
269
+ type: "textArray",
270
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
271
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
272
+ defaultValue: [".", "!", "?", "\\n"],
273
+ condition: {
274
+ key: "storeLocation",
275
+ value: "stream",
276
+ }
277
+ },
278
+ {
279
+ key: "streamStopTokenOverrides",
280
+ type: "textArray",
281
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__LABEL",
282
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__DESCRIPTION",
283
+ defaultValue: ["\d+\."],
284
+ condition: {
285
+ key: "storeLocation",
286
+ value: "stream",
287
+ }
288
+ },
289
+ {
290
+ key: "streamDescription",
291
+ type: "description",
292
+ label: " ",
293
+ params: {
294
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_DESCRIPTION__TEXT"
295
+ },
296
+ condition: {
297
+ key: "storeLocation",
298
+ value: "stream",
299
+ }
300
+ },
301
+ {
302
+ key: "streamStoreCopyInInput",
303
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__LABEL",
304
+ type: "toggle",
305
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__DESCRIPTION",
306
+ defaultValue: false,
307
+ condition: {
308
+ key: "storeLocation",
309
+ value: "stream",
310
+ }
311
+ },
312
+ {
313
+ key: "debugLogTokenCount",
314
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__LABEL",
315
+ type: "toggle",
316
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__DESCRIPTION",
317
+ defaultValue: false
318
+ },
319
+ {
320
+ key: "debugLogRequestAndCompletion",
321
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__LABEL",
322
+ type: "toggle",
323
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__DESCRIPTION",
324
+ defaultValue: false
325
+ },
326
+ {
327
+ key: "debugLogLLMLatency",
328
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
329
+ type: "toggle",
330
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
331
+ defaultValue: false
332
+ },
333
+ {
334
+ key: "debugDescription",
335
+ type: "description",
336
+ label: " ",
337
+ params: {
338
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUG_DESCRIPTION__TEXT"
339
+ }
340
+ },
341
+ {
342
+ key: "responseFormat",
343
+ type: "select",
344
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__LABEL",
345
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__DESCRIPTION",
346
+ defaultValue: "default",
347
+ params: {
348
+ options: [
349
+ {
350
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__DEFAULT__LABEL",
351
+ value: "default"
352
+ },
353
+ {
354
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__TEXT__LABEL",
355
+ value: "text"
356
+ },
357
+ {
358
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__JSON__LABEL",
359
+ value: "json_object"
360
+ }
361
+ ]
362
+ },
363
+ },
364
+ {
365
+ key: "seed",
366
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__LABEL",
367
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__DESCRIPTION",
368
+ type: "cognigyText",
369
+ defaultValue: ""
370
+ },
371
+ {
372
+ key: "jsonStreamWarning",
373
+ type: "description",
374
+ label: " ",
375
+ params: {
376
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__JSONSTREAMWARNING__PARAM"
377
+ },
378
+ condition: {
379
+ and: [
380
+ {
381
+ key: "responseFormat",
382
+ value: "json_object"
383
+ },
384
+ {
385
+ key: "storeLocation",
386
+ value: "stream",
387
+ }
388
+ ]
389
+ }
390
+ },
391
+ {
392
+ key: "customModelOptions",
393
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
394
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__DESCRIPTION",
395
+ type: "json",
396
+ defaultValue: {}
397
+ },
398
+ {
399
+ key: "customRequestOptions",
400
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__LABEL",
401
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__DESCRIPTION",
402
+ type: "json",
403
+ defaultValue: {}
404
+ },
405
+ {
406
+ key: "logErrorToSystem",
407
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__LABEL",
408
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
409
+ type: "toggle",
410
+ defaultValue: false,
411
+ },
412
+ {
413
+ key: "errorHandling",
414
+ type: "select",
415
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__LABEL",
416
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__DESCRIPTION",
417
+ defaultValue: "continue",
418
+ params: {
419
+ options: [
420
+ {
421
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__STOP__LABEL",
422
+ value: "stop"
423
+ },
424
+ {
425
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__CONTINUE__LABEL",
426
+ value: "continue"
427
+ },
428
+ {
429
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__GOTO__LABEL",
430
+ value: "goto"
431
+ },
432
+ ]
433
+ }
434
+ },
435
+ {
436
+ key: "errorMessage",
437
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__LABEL",
438
+ type: "cognigyText",
439
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__DESCRIPTION",
440
+ defaultValue: "",
441
+ condition: {
442
+ key: "errorHandling",
443
+ value: "continue"
444
+ }
445
+ },
446
+ {
447
+ key: "errorHandlingGotoTarget",
448
+ type: "flowNode",
449
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR__GOTO_NODE__LABEL",
450
+ condition: {
451
+ key: "errorHandling",
452
+ value: "goto"
453
+ }
454
+ },
455
+ {
456
+ key: "toolChoice",
457
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__LABEL",
458
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__DESCRIPTION",
459
+ type: "select",
460
+ defaultValue: "auto",
461
+ params: {
462
+ options: [
463
+ {
464
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__AUTO__LABEL',
465
+ value: 'auto'
466
+ },
467
+ {
468
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__REQUIRED__LABEL',
469
+ value: 'required'
470
+ },
471
+ {
472
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__NONE__LABEL',
473
+ value: 'none'
474
+ }
475
+ ]
476
+ }
477
+ },
478
+ {
479
+ key: "useStrict",
480
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__LABEL",
481
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__DESCRIPTION",
482
+ type: "toggle",
483
+ defaultValue: false
484
+ },
485
+ {
486
+ key: "processImages",
487
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__LABEL",
488
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__DESCRIPTION",
489
+ type: "toggle",
490
+ defaultValue: false
491
+ },
492
+ {
493
+ key: "transcriptImageHandling",
494
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__LABEL",
495
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__DESCRIPTION",
496
+ type: "select",
497
+ defaultValue: "minify",
498
+ params: {
499
+ options: [
500
+ {
501
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__MINIFY__LABEL",
502
+ value: "minify"
503
+ },
504
+ {
505
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__DROP__LABEL",
506
+ value: "drop"
507
+ },
508
+ {
509
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__KEEP__LABEL",
510
+ value: "keep"
511
+ }
512
+ ],
513
+ },
514
+ },
515
+ {
516
+ key: "debugLogToolDefinitions",
517
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__LABEL",
518
+ type: "toggle",
519
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__DESCRIPTION",
520
+ defaultValue: false
521
+ },
522
+ ],
523
+ sections: [
524
+ {
525
+ key: "advanced",
526
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ADVANCED__LABEL",
527
+ defaultCollapsed: true,
528
+ fields: [
529
+ "maxTokens",
530
+ "usePromptMode",
531
+ "chatTranscriptSteps",
532
+ "responseFormat",
533
+ "jsonStreamWarning",
534
+ "timeout",
535
+ "samplingMethod",
536
+ "temperature",
537
+ "topP",
538
+ "presencePenalty",
539
+ "frequencyPenalty",
540
+ "useStop",
541
+ "stop",
542
+ "seed"
543
+ ]
544
+ },
545
+ {
546
+ key: "storage",
547
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__STORAGE__LABEL",
548
+ defaultCollapsed: true,
549
+ fields: [
550
+ "storeLocation",
551
+ "jsonStreamWarning",
552
+ "streamDescription",
553
+ "inputKey",
554
+ "contextKey",
555
+ "immediateOutput",
556
+ "streamStopTokens",
557
+ "streamStopTokenOverrides",
558
+ "streamStoreCopyInInput"
559
+ ]
560
+ },
561
+ {
562
+ key: "errorHandling",
563
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ERROR_HANDLING__LABEL",
564
+ defaultCollapsed: true,
565
+ fields: [
566
+ "logErrorToSystem",
567
+ "errorHandling",
568
+ "errorMessage",
569
+ "errorHandlingGotoTarget",
570
+ ]
571
+ },
572
+ {
573
+ key: "customOptions",
574
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__CUSTOM_OPTIONS__LABEL",
575
+ defaultCollapsed: true,
576
+ fields: [
577
+ "customModelOptions",
578
+ "customRequestOptions"
579
+ ]
580
+ },
581
+ {
582
+ key: "debugging",
583
+ label: "UI__NODE_EDITOR__SECTIONS__DEBUG_SETTINGS__LABEL",
584
+ defaultCollapsed: true,
585
+ fields: [
586
+ "debugDescription",
587
+ "debugLogTokenCount",
588
+ "debugLogRequestAndCompletion",
589
+ "debugLogLLMLatency",
590
+ "debugLogToolDefinitions"
591
+ ]
592
+ },
593
+ {
594
+ key: "toolSettings",
595
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__TOOL_SETTINGS__LABEL",
596
+ defaultCollapsed: true,
597
+ fields: [
598
+ "toolChoice",
599
+ "useStrict",
600
+ ],
601
+ },
602
+ {
603
+ key: "imageHandling",
604
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__IMAGE_HANDLING__LABEL",
605
+ defaultCollapsed: true,
606
+ fields: [
607
+ "processImages",
608
+ "transcriptImageHandling"
609
+ ]
610
+ },
611
+ ],
612
+ form: [
613
+ { type: "field", key: "llmProviderReferenceId" },
614
+ { type: "field", key: "prompt" },
615
+ { type: "section", key: "advanced" },
616
+ { type: "section", key: "storage" },
617
+ { type: "section", key: "toolSettings" },
618
+ { type: "section", key: "imageHandling" },
619
+ { type: "section", key: "errorHandling" },
620
+ { type: "section", key: "customOptions" },
621
+ { type: "section", key: "debugging" },
622
+ ],
623
+ appearance: {
624
+ color: "#252525",
625
+ },
626
+ tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
627
+ function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
628
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
629
+ const { api, input, flowReferenceId } = cognigy;
630
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
631
+ errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
632
+ let prompt = config.prompt || "";
633
+ const { traceId } = input;
634
+ // check if custom variables are used and if they have a length modifier
635
+ // works only for a single variable per prompt
636
+ if (prompt.includes("@cognigyRecentConversation")) {
637
+ let turnLimit;
638
+ if (prompt.match(/@cognigyRecentConversation:(\d+)/)) {
639
+ // @cognigyRecentConversation has a length modifier (e.g. @cognigyRecentConversation:5), so we just want to return the top 5 turns
640
+ turnLimit = Number((_a = prompt.match(/@cognigyRecentConversation:(\d+)/)) === null || _a === void 0 ? void 0 : _a[1]);
641
+ }
642
+ const recentConversation = createLastConverationString(cognigy.lastConversationEntries, turnLimit) + "\n";
643
+ prompt = prompt.replace(/@cognigyRecentConversation(:\d+)?/, recentConversation);
644
+ }
645
+ if (prompt.includes("@cognigyRecentUserInputs")) {
646
+ let turnLimit;
647
+ if (prompt.match(/@cognigyRecentUserInputs:(\d+)/)) {
648
+ // @cognigyRecentUserInputs has a length modifier (e.g. @cognigyRecentUserInputs:5), so we just want to return the top 5 entries
649
+ turnLimit = Number((_b = prompt.match(/@cognigyRecentUserInputs:(\d+)/)) === null || _b === void 0 ? void 0 : _b[1]);
650
+ }
651
+ const recentUserInputs = createLastUserInputString(cognigy.lastConversationEntries, turnLimit) + "\n";
652
+ prompt = prompt.replace(/@cognigyRecentUserInputs(:\d+)?/, recentUserInputs);
653
+ }
654
+ // handle errors from external services, depending on the settings
655
+ const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
656
+ var _w, _x, _y, _z, _0, _1;
657
+ const compactError = {
658
+ name: error === null || error === void 0 ? void 0 : error.name,
659
+ code: error === null || error === void 0 ? void 0 : error.code,
660
+ message: (error === null || error === void 0 ? void 0 : error.message) || error
661
+ };
662
+ // return the requestId if it exist in the error obj.
663
+ if ((_w = error === null || error === void 0 ? void 0 : error.meta) === null || _w === void 0 ? void 0 : _w.requestId) {
664
+ compactError["requestId"] = (_x = error === null || error === void 0 ? void 0 : error.meta) === null || _x === void 0 ? void 0 : _x.requestId;
665
+ }
666
+ if ((_y = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _y === void 0 ? void 0 : _y.code) {
667
+ compactError.code = error.originalErrorDetails.code;
668
+ }
669
+ const errorResponse = {
670
+ error: compactError,
671
+ };
672
+ // add error to context or input
673
+ switch (storeLocation) {
674
+ case "context":
675
+ (_z = api.addToContext) === null || _z === void 0 ? void 0 : _z.call(api, contextKey, errorResponse, "simple");
676
+ break;
677
+ default:
678
+ api.addToInput(inputKey, errorResponse);
679
+ }
680
+ if (errorHandling === "continue") {
681
+ // output the timeout message
682
+ if (errorMessage) {
683
+ yield ((_0 = api.output) === null || _0 === void 0 ? void 0 : _0.call(api, errorMessage, null));
684
+ }
685
+ // Continue with default node as next node
686
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
687
+ if (defaultChild) {
688
+ api.setNextNode(defaultChild.id);
689
+ }
690
+ }
691
+ else if (errorHandling === "goto") {
692
+ if (!errorHandlingGotoTarget) {
693
+ throw new Error("GoTo Target is required");
694
+ }
695
+ const gotoParams = {
696
+ cognigy,
697
+ childConfigs: [],
698
+ nodeId,
699
+ config: {
700
+ flowNode: {
701
+ flow: errorHandlingGotoTarget.flow,
702
+ node: errorHandlingGotoTarget.node,
703
+ },
704
+ injectedText: input.text,
705
+ injectedData: input.data,
706
+ executionMode: "continue",
707
+ absorbContext: false
708
+ }
709
+ };
710
+ yield ((_1 = GO_TO.function) === null || _1 === void 0 ? void 0 : _1.call(GO_TO, gotoParams));
711
+ }
712
+ else {
713
+ throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
714
+ }
715
+ });
716
+ try {
717
+ const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
718
+ const _messageId = randomUUID();
719
+ // Start measuring LLM latency and time to first output if debug flag is enabled
720
+ let firstOutputTime = null;
721
+ /**
722
+ * Retrieve the tool definitions from the child nodes
723
+ */
724
+ const { toolIds, toolNames, toolMap, tools } = yield createToolDefinitions(childConfigs, api, useStrict);
725
+ /**
726
+ * Generate Prompt Options
727
+ */
728
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt,
729
+ temperature,
730
+ maxTokens,
731
+ topP,
732
+ presencePenalty,
733
+ frequencyPenalty, timeoutInMs: timeout, useCase: "promptNode", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
734
+ var _a;
735
+ text = isStreamingChannel ? text : text.trim();
736
+ if (text) {
737
+ // Record first output time for debugging if not already recorded
738
+ if (debugLogLLMLatency && firstOutputTime === null) {
739
+ firstOutputTime = Date.now();
740
+ }
741
+ // if we got text, we output it, but prevent it from being added to the transcript
742
+ (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
743
+ _cognigy: {
744
+ _preventTranscript: true,
745
+ _messageId,
746
+ }
747
+ });
748
+ }
749
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], streamStopTokenOverrides, preventNewLineRemoval: isStreamingChannel ? true : false,
750
+ // set to true in order to get token usage
751
+ detailedResults: true, seed: Number(seed) ? Number(seed) : undefined }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), { customModelOptions,
752
+ customRequestOptions });
753
+ if (useStop) {
754
+ llmPromptOptions["stop"] = stop;
755
+ }
756
+ // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
757
+ if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
758
+ llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
759
+ }
760
+ if (processImages) {
761
+ llmPromptOptions["imageOptions"] = {
762
+ processImages,
763
+ transcriptImageHandling
764
+ };
765
+ }
766
+ if (responseFormat && responseFormat !== "default") {
767
+ llmPromptOptions["responseFormat"] = responseFormat;
768
+ }
769
+ let debugPrompt = prompt;
770
+ // if we're not using prompt mode, we need to add the system message and the transcript
771
+ // this is the equivalent of the old "useChat" mode
772
+ if (!usePromptMode) {
773
+ const transcript = yield api.getTranscript({
774
+ limit: chatTranscriptSteps || 50,
775
+ rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
776
+ excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
777
+ });
778
+ llmPromptOptions["transcript"] = transcript;
779
+ llmPromptOptions["chat"] = [{
780
+ role: "system",
781
+ content: prompt
782
+ }];
783
+ }
784
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
785
+ // Run the LLM Query
786
+ const fullLlmResult = yield api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
787
+ // End measuring times and log if debug flag is enabled
788
+ if (debugLogLLMLatency) {
789
+ const llmEndTime = Date.now();
790
+ const debugMessages = [];
791
+ const llmLatencyMs = llmEndTime - llmStartTime;
792
+ let timeToFirstOutputLabel;
793
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
794
+ timeToFirstOutputLabel = " - (tool call)";
795
+ }
796
+ else if (firstOutputTime === null) {
797
+ timeToFirstOutputLabel = " - (no output)";
798
+ }
799
+ else {
800
+ firstOutputTime = firstOutputTime || llmEndTime;
801
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
802
+ }
803
+ if (storeLocation === "stream") {
804
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
805
+ }
806
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
807
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
808
+ }
809
+ const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
810
+ const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
811
+ if (debugLogToolDefinitions) {
812
+ (_d = api.logDebugMessage) === null || _d === void 0 ? void 0 : _d.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
813
+ }
814
+ // if we're in adminconsole or following a session, process debugging options
815
+ (input.endpointType === "adminconsole" || isFollowSessionActive) && writeLLMDebugLogs("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, "llmPromptV2");
816
+ if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
817
+ const mainToolCall = llmResult.toolCalls[0];
818
+ let isMcpToolCall = false;
819
+ // Find the child node with the toolId of the tool call
820
+ let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "llmPromptTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
821
+ if (!toolChild && toolMap.has(mainToolCall.function.name)) {
822
+ // If the tool call is from an MCP tool, set the next node to the corresponding child node
823
+ toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
824
+ isMcpToolCall = true;
825
+ }
826
+ if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
827
+ (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
828
+ }
829
+ // Add last tool call to session state for loading it from Tool Answer Node
830
+ api.updateSessionStateValues({
831
+ lastToolCall: Object.assign(Object.assign({ llmProvider: fullLlmResult.llmProvider, aiAgentJobNode: {
832
+ flow: flowReferenceId,
833
+ node: nodeId,
834
+ } }, (isMcpToolCall && {
835
+ mcpServerUrl: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.mcpServerUrl,
836
+ timeout: (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.timeout,
837
+ mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
838
+ })), { toolCall: mainToolCall }),
839
+ });
840
+ // if there are any parameters/arguments, add them to the input slots
841
+ if (mainToolCall.function.arguments) {
842
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_j = (_h = input.llmPrompt) === null || _h === void 0 ? void 0 : _h.toolArgs) !== null && _j !== void 0 ? _j : {}), mainToolCall.function.arguments) });
843
+ }
844
+ // Debug Message for Tool Calls, configured in the Tool Node
845
+ if ((_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.debugMessage) {
846
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
847
+ // Arguments / Parameters Slots
848
+ const slots = ((_l = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _l === void 0 ? void 0 : _l.arguments) && Object.keys(mainToolCall.function.arguments);
849
+ const hasSlots = slots && slots.length > 0;
850
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
851
+ if (hasSlots) {
852
+ slots.forEach(slot => {
853
+ let slotValueAsString = mainToolCall.function.arguments[slot];
854
+ if (typeof slotValueAsString === "object" && slotValueAsString !== null) {
855
+ slotValueAsString = JSON.stringify(slotValueAsString, null, 2);
856
+ }
857
+ else {
858
+ String(slotValueAsString);
859
+ }
860
+ messageLines.push(`- ${slot}: ${slotValueAsString}`);
861
+ });
862
+ }
863
+ (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
864
+ }
865
+ if (toolChild) {
866
+ api.setNextNode(toolChild.id);
867
+ }
868
+ else {
869
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
870
+ if (defaultChild) {
871
+ api.setNextNode(defaultChild.id);
872
+ }
873
+ }
874
+ }
875
+ else {
876
+ // Default case
877
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
878
+ if (defaultChild) {
879
+ api.setNextNode(defaultChild.id);
880
+ }
881
+ }
882
+ // Optionally output the result immediately
883
+ // This will also store it into the output
884
+ if (llmResult.result && immediateOutput && !llmPromptOptions.stream) {
885
+ // we stringify objects (e.g. results coming from JSON Mode)
886
+ // so that the transcript only contains text
887
+ const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
888
+ yield ((_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, resultToOutput, {}));
889
+ }
890
+ else if (llmResult.finishReason && llmPromptOptions.stream) {
891
+ // send the finishReason as last output for a stream
892
+ (_p = api.output) === null || _p === void 0 ? void 0 : _p.call(api, "", {
893
+ _cognigy: {
894
+ _preventTranscript: true,
895
+ _messageId,
896
+ _finishReason: llmResult.finishReason,
897
+ }
898
+ });
899
+ }
900
+ // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
901
+ if (llmResult.result && llmPromptOptions.stream) {
902
+ const transcriptContent = {
903
+ role: TranscriptRole.ASSISTANT,
904
+ type: TranscriptEntryType.OUTPUT,
905
+ source: "assistant",
906
+ payload: {
907
+ text: ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult),
908
+ data: {},
909
+ }
910
+ };
911
+ yield api.addTranscriptStep(transcriptContent);
912
+ }
913
+ // Add response to Cognigy Input/Context for further usage
914
+ if (storeLocation === "context") {
915
+ (_q = api.addToContext) === null || _q === void 0 ? void 0 : _q.call(api, contextKey, llmResult, "simple");
916
+ }
917
+ else if (storeLocation === "input") {
918
+ api.addToInput(inputKey, llmResult);
919
+ }
920
+ else if (storeLocation === "stream" && streamStoreCopyInInput) {
921
+ api.addToInput(inputKey, llmResult);
922
+ }
923
+ }
924
+ catch (error) {
925
+ const errorDetailsBase = {
926
+ name: error === null || error === void 0 ? void 0 : error.name,
927
+ code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
928
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_r = error.originalErrorDetails) === null || _r === void 0 ? void 0 : _r.message),
929
+ };
930
+ const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
931
+ // return the requestId if it exist in the error obj.
932
+ if ((_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId) {
933
+ errorDetails["meta"] = {
934
+ requestId: (_t = error.meta) === null || _t === void 0 ? void 0 : _t.requestId
935
+ };
936
+ }
937
+ if (logErrorToSystem) {
938
+ (_u = api.log) === null || _u === void 0 ? void 0 : _u.call(api, "error", JSON.stringify(errorDetailsBase));
939
+ }
940
+ (_v = api.logDebugError) === null || _v === void 0 ? void 0 : _v.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
941
+ yield handleServiceError(errorDetails);
942
+ return;
943
+ }
944
+ })
945
+ });
946
+ //# sourceMappingURL=LLMPromptV2.js.map