@cognigy/rest-api-client 2025.12.0 → 2025.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -0
  3. package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  4. package/build/shared/charts/descriptors/index.js +5 -0
  5. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +4 -2
  6. package/build/shared/charts/descriptors/message/question/question.js +12 -1
  7. package/build/shared/charts/descriptors/service/GPTPrompt.js +15 -1
  8. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +32 -173
  9. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  10. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
  11. package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
  12. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  13. package/build/shared/charts/descriptors/service/index.js +11 -1
  14. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +922 -0
  15. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
  16. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
  17. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
  18. package/build/shared/constants.js +1 -5
  19. package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
  20. package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  21. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  22. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  23. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
  24. package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
  25. package/build/shared/interfaces/security/IPermission.js +2 -0
  26. package/build/shared/interfaces/security/IRole.js +3 -1
  27. package/build/shared/interfaces/security/index.js +1 -1
  28. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -0
  29. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  30. package/dist/esm/shared/charts/descriptors/index.js +6 -1
  31. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +4 -2
  32. package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
  33. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +15 -1
  34. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +33 -174
  35. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  36. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
  37. package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
  38. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  39. package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
  40. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +909 -0
  41. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
  42. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
  43. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
  44. package/dist/esm/shared/constants.js +1 -5
  45. package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
  46. package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  47. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  48. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  49. package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
  50. package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
  51. package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
  52. package/dist/esm/shared/interfaces/security/IRole.js +3 -1
  53. package/dist/esm/shared/interfaces/security/index.js +1 -1
  54. package/package.json +1 -1
  55. package/types/index.d.ts +83 -42
@@ -0,0 +1,922 @@
1
+ "use strict";
2
+ var __rest = (this && this.__rest) || function (s, e) {
3
+ var t = {};
4
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
5
+ t[p] = s[p];
6
+ if (s != null && typeof Object.getOwnPropertySymbols === "function")
7
+ for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
8
+ if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
9
+ t[p[i]] = s[p[i]];
10
+ }
11
+ return t;
12
+ };
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ exports.LLM_PROMPT_V2 = void 0;
15
+ /* Custom modules */
16
+ const createNodeDescriptor_1 = require("../../../createNodeDescriptor");
17
+ const logic_1 = require("../../logic");
18
+ const crypto_1 = require("crypto");
19
+ const createToolDefinitions_1 = require("../aiAgent/helpers/createToolDefinitions");
20
+ const prompt_1 = require("../../nlu/generativeSlotFiller/prompt");
21
+ const errors_1 = require("../../../../errors");
22
+ const transcripts_1 = require("../../../../interfaces/transcripts/transcripts");
23
+ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
24
+ type: "llmPromptV2",
25
+ defaultLabel: "LLM Prompt",
26
+ summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
27
+ constraints: {
28
+ collapsable: true,
29
+ placement: {
30
+ children: {
31
+ whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool"],
32
+ },
33
+ },
34
+ },
35
+ behavior: {
36
+ entrypoint: true
37
+ },
38
+ dependencies: {
39
+ children: ["llmPromptDefault", "llmPromptTool"]
40
+ },
41
+ preview: {
42
+ type: "text",
43
+ key: "prompt",
44
+ },
45
+ fields: [
46
+ {
47
+ key: "llmProviderReferenceId",
48
+ type: "llmSelect",
49
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LLM_SELECT__LABEL",
50
+ defaultValue: "default",
51
+ params: {
52
+ required: true
53
+ }
54
+ },
55
+ {
56
+ key: "prompt",
57
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__LABEL",
58
+ type: "cognigyLLMText",
59
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__DESCRIPTION",
60
+ params: {
61
+ multiline: true,
62
+ rows: 5,
63
+ required: false
64
+ },
65
+ defaultValue: ""
66
+ },
67
+ {
68
+ key: "chatTranscriptSteps",
69
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__LABEL",
70
+ type: "slider",
71
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__DESCRIPTION",
72
+ defaultValue: 50,
73
+ params: {
74
+ min: 0,
75
+ max: 50,
76
+ step: 1
77
+ },
78
+ condition: {
79
+ key: "usePromptMode",
80
+ value: false,
81
+ }
82
+ },
83
+ {
84
+ key: "usePromptMode",
85
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_PROMPT__LABEL",
86
+ type: "toggle",
87
+ params: {
88
+ required: true
89
+ },
90
+ defaultValue: false
91
+ },
92
+ {
93
+ key: "samplingMethod",
94
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__LABEL",
95
+ type: "select",
96
+ defaultValue: "temperature",
97
+ params: {
98
+ options: [
99
+ {
100
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TEMPERATURE__LABEL",
101
+ value: "temperature"
102
+ },
103
+ {
104
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TOP_PERCENTATGE__LABEL",
105
+ value: "topP"
106
+ }
107
+ ]
108
+ }
109
+ },
110
+ {
111
+ key: "temperature",
112
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__LABEL",
113
+ type: "slider",
114
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__DESCRIPTION",
115
+ defaultValue: 0.7,
116
+ params: {
117
+ min: 0,
118
+ max: 1,
119
+ step: 0.1
120
+ },
121
+ condition: {
122
+ key: "samplingMethod",
123
+ value: "temperature",
124
+ }
125
+ },
126
+ {
127
+ key: "topP",
128
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__LABEL",
129
+ type: "slider",
130
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__DESCRIPTION",
131
+ defaultValue: 1,
132
+ params: {
133
+ min: 0,
134
+ max: 1,
135
+ step: 0.1
136
+ },
137
+ condition: {
138
+ key: "samplingMethod",
139
+ value: "topP",
140
+ }
141
+ },
142
+ {
143
+ key: "maxTokens",
144
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__LABEL",
145
+ type: "slider",
146
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__DESCRIPTION",
147
+ defaultValue: 1000,
148
+ params: {
149
+ min: 100,
150
+ max: 16000,
151
+ step: 100
152
+ }
153
+ },
154
+ {
155
+ key: "frequencyPenalty",
156
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__LABEL",
157
+ type: "slider",
158
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__DESCRIPTION",
159
+ defaultValue: 0,
160
+ params: {
161
+ min: -2,
162
+ max: 2,
163
+ step: 0.1
164
+ }
165
+ },
166
+ {
167
+ key: "presencePenalty",
168
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__LABEL",
169
+ type: "slider",
170
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__DESCRIPTION",
171
+ defaultValue: 0,
172
+ params: {
173
+ min: -2,
174
+ max: 2,
175
+ step: 0.1
176
+ }
177
+ },
178
+ {
179
+ key: "useStop",
180
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__LABEL",
181
+ type: "toggle",
182
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__DESCRIPTION",
183
+ defaultValue: false
184
+ },
185
+ {
186
+ key: "stop",
187
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__LABEL",
188
+ type: "textArray",
189
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__DESCRIPTION",
190
+ condition: {
191
+ key: "useStop",
192
+ value: true
193
+ }
194
+ },
195
+ {
196
+ key: "timeout",
197
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__LABEL",
198
+ defaultValue: 8000,
199
+ type: "number",
200
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__DESCRIPTION",
201
+ },
202
+ {
203
+ key: "storeLocation",
204
+ type: "select",
205
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__LABEL",
206
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__DESCRIPTION",
207
+ defaultValue: "stream",
208
+ params: {
209
+ options: [
210
+ {
211
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
212
+ value: "input"
213
+ },
214
+ {
215
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
216
+ value: "context"
217
+ },
218
+ {
219
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__STREAM__LABEL",
220
+ value: "stream"
221
+ }
222
+ ],
223
+ required: true
224
+ },
225
+ },
226
+ {
227
+ key: "immediateOutput",
228
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__IMMEDIATEOUTPUT__LABEL",
229
+ type: "toggle",
230
+ defaultValue: true,
231
+ condition: {
232
+ or: [
233
+ {
234
+ key: "storeLocation",
235
+ value: "input",
236
+ },
237
+ {
238
+ key: "storeLocation",
239
+ value: "context",
240
+ }
241
+ ]
242
+ }
243
+ },
244
+ {
245
+ key: "inputKey",
246
+ type: "cognigyText",
247
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__INPUT_KEY__LABEL",
248
+ defaultValue: "promptResult",
249
+ condition: {
250
+ or: [
251
+ {
252
+ key: "storeLocation",
253
+ value: "input",
254
+ },
255
+ {
256
+ and: [
257
+ {
258
+ key: "storeLocation",
259
+ value: "stream",
260
+ },
261
+ {
262
+ key: "streamStoreCopyInInput",
263
+ value: true,
264
+ }
265
+ ]
266
+ }
267
+ ]
268
+ }
269
+ },
270
+ {
271
+ key: "contextKey",
272
+ type: "cognigyText",
273
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CONTEXT_KEY__LABEL",
274
+ defaultValue: "promptResult",
275
+ condition: {
276
+ key: "storeLocation",
277
+ value: "context",
278
+ }
279
+ },
280
+ {
281
+ key: "streamStopTokens",
282
+ type: "textArray",
283
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
284
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
285
+ defaultValue: [".", "!", "?", "\\n"],
286
+ condition: {
287
+ key: "storeLocation",
288
+ value: "stream",
289
+ }
290
+ },
291
+ {
292
+ key: "streamStopTokenOverrides",
293
+ type: "textArray",
294
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__LABEL",
295
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__DESCRIPTION",
296
+ defaultValue: ["\d+\."],
297
+ condition: {
298
+ key: "storeLocation",
299
+ value: "stream",
300
+ }
301
+ },
302
+ {
303
+ key: "streamDescription",
304
+ type: "description",
305
+ label: " ",
306
+ params: {
307
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_DESCRIPTION__TEXT"
308
+ },
309
+ condition: {
310
+ key: "storeLocation",
311
+ value: "stream",
312
+ }
313
+ },
314
+ {
315
+ key: "streamStoreCopyInInput",
316
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__LABEL",
317
+ type: "toggle",
318
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__DESCRIPTION",
319
+ defaultValue: false,
320
+ condition: {
321
+ key: "storeLocation",
322
+ value: "stream",
323
+ }
324
+ },
325
+ {
326
+ key: "debugLogTokenCount",
327
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__LABEL",
328
+ type: "toggle",
329
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__DESCRIPTION",
330
+ defaultValue: false
331
+ },
332
+ {
333
+ key: "debugLogRequestAndCompletion",
334
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__LABEL",
335
+ type: "toggle",
336
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__DESCRIPTION",
337
+ defaultValue: false
338
+ },
339
+ {
340
+ key: "debugDescription",
341
+ type: "description",
342
+ label: " ",
343
+ params: {
344
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUG_DESCRIPTION__TEXT"
345
+ }
346
+ },
347
+ {
348
+ key: "responseFormat",
349
+ type: "select",
350
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__LABEL",
351
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__DESCRIPTION",
352
+ defaultValue: "default",
353
+ params: {
354
+ options: [
355
+ {
356
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__DEFAULT__LABEL",
357
+ value: "default"
358
+ },
359
+ {
360
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__TEXT__LABEL",
361
+ value: "text"
362
+ },
363
+ {
364
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__JSON__LABEL",
365
+ value: "json_object"
366
+ }
367
+ ]
368
+ },
369
+ },
370
+ {
371
+ key: "seed",
372
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__LABEL",
373
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__DESCRIPTION",
374
+ type: "cognigyText",
375
+ defaultValue: ""
376
+ },
377
+ {
378
+ key: "jsonStreamWarning",
379
+ type: "description",
380
+ label: " ",
381
+ params: {
382
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__JSONSTREAMWARNING__PARAM"
383
+ },
384
+ condition: {
385
+ and: [
386
+ {
387
+ key: "responseFormat",
388
+ value: "json_object"
389
+ },
390
+ {
391
+ key: "storeLocation",
392
+ value: "stream",
393
+ }
394
+ ]
395
+ }
396
+ },
397
+ {
398
+ key: "customModelOptions",
399
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
400
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__DESCRIPTION",
401
+ type: "json",
402
+ defaultValue: {}
403
+ },
404
+ {
405
+ key: "customRequestOptions",
406
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__LABEL",
407
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__DESCRIPTION",
408
+ type: "json",
409
+ defaultValue: {}
410
+ },
411
+ {
412
+ key: "logErrorToSystem",
413
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__LABEL",
414
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
415
+ type: "toggle",
416
+ defaultValue: false,
417
+ },
418
+ {
419
+ key: "errorHandling",
420
+ type: "select",
421
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__LABEL",
422
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__DESCRIPTION",
423
+ defaultValue: "continue",
424
+ params: {
425
+ options: [
426
+ {
427
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__STOP__LABEL",
428
+ value: "stop"
429
+ },
430
+ {
431
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__CONTINUE__LABEL",
432
+ value: "continue"
433
+ },
434
+ {
435
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__GOTO__LABEL",
436
+ value: "goto"
437
+ },
438
+ ]
439
+ }
440
+ },
441
+ {
442
+ key: "errorMessage",
443
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__LABEL",
444
+ type: "cognigyText",
445
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__DESCRIPTION",
446
+ defaultValue: "",
447
+ condition: {
448
+ key: "errorHandling",
449
+ value: "continue"
450
+ }
451
+ },
452
+ {
453
+ key: "errorHandlingGotoTarget",
454
+ type: "flowNode",
455
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR__GOTO_NODE__LABEL",
456
+ condition: {
457
+ key: "errorHandling",
458
+ value: "goto"
459
+ }
460
+ },
461
+ {
462
+ key: "toolChoice",
463
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__LABEL",
464
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__DESCRIPTION",
465
+ type: "select",
466
+ defaultValue: "auto",
467
+ params: {
468
+ options: [
469
+ {
470
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__AUTO__LABEL',
471
+ value: 'auto'
472
+ },
473
+ {
474
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__REQUIRED__LABEL',
475
+ value: 'required'
476
+ },
477
+ {
478
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__NONE__LABEL',
479
+ value: 'none'
480
+ }
481
+ ]
482
+ }
483
+ },
484
+ {
485
+ key: "useStrict",
486
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__LABEL",
487
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__DESCRIPTION",
488
+ type: "toggle",
489
+ defaultValue: false
490
+ },
491
+ {
492
+ key: "processImages",
493
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__LABEL",
494
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__DESCRIPTION",
495
+ type: "toggle",
496
+ defaultValue: false
497
+ },
498
+ {
499
+ key: "transcriptImageHandling",
500
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__LABEL",
501
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__DESCRIPTION",
502
+ type: "select",
503
+ defaultValue: "minify",
504
+ params: {
505
+ options: [
506
+ {
507
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__MINIFY__LABEL",
508
+ value: "minify"
509
+ },
510
+ {
511
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__DROP__LABEL",
512
+ value: "drop"
513
+ },
514
+ {
515
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__KEEP__LABEL",
516
+ value: "keep"
517
+ }
518
+ ],
519
+ },
520
+ },
521
+ {
522
+ key: "debugLogToolDefinitions",
523
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__LABEL",
524
+ type: "toggle",
525
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__DESCRIPTION",
526
+ defaultValue: false
527
+ },
528
+ ],
529
+ sections: [
530
+ {
531
+ key: "advanced",
532
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ADVANCED__LABEL",
533
+ defaultCollapsed: true,
534
+ fields: [
535
+ "maxTokens",
536
+ "usePromptMode",
537
+ "chatTranscriptSteps",
538
+ "responseFormat",
539
+ "jsonStreamWarning",
540
+ "timeout",
541
+ "samplingMethod",
542
+ "temperature",
543
+ "topP",
544
+ "presencePenalty",
545
+ "frequencyPenalty",
546
+ "useStop",
547
+ "stop",
548
+ "seed"
549
+ ]
550
+ },
551
+ {
552
+ key: "storage",
553
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__STORAGE__LABEL",
554
+ defaultCollapsed: true,
555
+ fields: [
556
+ "storeLocation",
557
+ "jsonStreamWarning",
558
+ "streamDescription",
559
+ "inputKey",
560
+ "contextKey",
561
+ "immediateOutput",
562
+ "streamStopTokens",
563
+ "streamStopTokenOverrides",
564
+ "streamStoreCopyInInput"
565
+ ]
566
+ },
567
+ {
568
+ key: "errorHandling",
569
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ERROR_HANDLING__LABEL",
570
+ defaultCollapsed: true,
571
+ fields: [
572
+ "logErrorToSystem",
573
+ "errorHandling",
574
+ "errorMessage",
575
+ "errorHandlingGotoTarget",
576
+ ]
577
+ },
578
+ {
579
+ key: "customOptions",
580
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__CUSTOM_OPTIONS__LABEL",
581
+ defaultCollapsed: true,
582
+ fields: [
583
+ "customModelOptions",
584
+ "customRequestOptions"
585
+ ]
586
+ },
587
+ {
588
+ key: "debugging",
589
+ label: "UI__NODE_EDITOR__SECTIONS__DEBUG_SETTINGS__LABEL",
590
+ defaultCollapsed: true,
591
+ fields: [
592
+ "debugDescription",
593
+ "debugLogTokenCount",
594
+ "debugLogRequestAndCompletion",
595
+ "debugLogToolDefinitions"
596
+ ]
597
+ },
598
+ {
599
+ key: "toolSettings",
600
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__TOOL_SETTINGS__LABEL",
601
+ defaultCollapsed: true,
602
+ fields: [
603
+ "toolChoice",
604
+ "useStrict",
605
+ ],
606
+ },
607
+ {
608
+ key: "imageHandling",
609
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__IMAGE_HANDLING__LABEL",
610
+ defaultCollapsed: true,
611
+ fields: [
612
+ "processImages",
613
+ "transcriptImageHandling"
614
+ ]
615
+ },
616
+ ],
617
+ form: [
618
+ { type: "field", key: "llmProviderReferenceId" },
619
+ { type: "field", key: "prompt" },
620
+ { type: "section", key: "advanced" },
621
+ { type: "section", key: "storage" },
622
+ { type: "section", key: "toolSettings" },
623
+ { type: "section", key: "imageHandling" },
624
+ { type: "section", key: "errorHandling" },
625
+ { type: "section", key: "customOptions" },
626
+ { type: "section", key: "debugging" },
627
+ ],
628
+ appearance: {
629
+ color: "#252525",
630
+ },
631
+ tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
632
+ function: async ({ cognigy, config, childConfigs, nodeId }) => {
633
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
634
+ const { api, input, flowReferenceId } = cognigy;
635
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
636
+ errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
637
+ let prompt = config.prompt || "";
638
+ const { traceId } = input;
639
+ // check if custom variables are used and if they have a length modifier
640
+ // works only for a single variable per prompt
641
+ if (prompt.includes("@cognigyRecentConversation")) {
642
+ let turnLimit;
643
+ if (prompt.match(/@cognigyRecentConversation:(\d+)/)) {
644
+ // @cognigyRecentConversation has a length modifier (e.g. @cognigyRecentConversation:5), so we just want to return the top 5 turns
645
+ turnLimit = Number((_a = prompt.match(/@cognigyRecentConversation:(\d+)/)) === null || _a === void 0 ? void 0 : _a[1]);
646
+ }
647
+ const recentConversation = (0, prompt_1.createLastConverationString)(cognigy.lastConversationEntries, turnLimit) + "\n";
648
+ prompt = prompt.replace(/@cognigyRecentConversation(:\d+)?/, recentConversation);
649
+ }
650
+ if (prompt.includes("@cognigyRecentUserInputs")) {
651
+ let turnLimit;
652
+ if (prompt.match(/@cognigyRecentUserInputs:(\d+)/)) {
653
+ // @cognigyRecentUserInputs has a length modifier (e.g. @cognigyRecentUserInputs:5), so we just want to return the top 5 entries
654
+ turnLimit = Number((_b = prompt.match(/@cognigyRecentUserInputs:(\d+)/)) === null || _b === void 0 ? void 0 : _b[1]);
655
+ }
656
+ const recentUserInputs = (0, prompt_1.createLastUserInputString)(cognigy.lastConversationEntries, turnLimit) + "\n";
657
+ prompt = prompt.replace(/@cognigyRecentUserInputs(:\d+)?/, recentUserInputs);
658
+ }
659
+ // handle errors from external services, depending on the settings
660
+ const handleServiceError = async (error) => {
661
+ var _a, _b, _c, _d, _e, _f;
662
+ const compactError = {
663
+ name: error === null || error === void 0 ? void 0 : error.name,
664
+ code: error === null || error === void 0 ? void 0 : error.code,
665
+ message: (error === null || error === void 0 ? void 0 : error.message) || error
666
+ };
667
+ // return the requestId if it exist in the error obj.
668
+ if ((_a = error === null || error === void 0 ? void 0 : error.meta) === null || _a === void 0 ? void 0 : _a.requestId) {
669
+ compactError["requestId"] = (_b = error === null || error === void 0 ? void 0 : error.meta) === null || _b === void 0 ? void 0 : _b.requestId;
670
+ }
671
+ if ((_c = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _c === void 0 ? void 0 : _c.code) {
672
+ compactError.code = error.originalErrorDetails.code;
673
+ }
674
+ const errorResponse = {
675
+ error: compactError,
676
+ };
677
+ // add error to context or input
678
+ switch (storeLocation) {
679
+ case "context":
680
+ (_d = api.addToContext) === null || _d === void 0 ? void 0 : _d.call(api, contextKey, errorResponse, "simple");
681
+ break;
682
+ default:
683
+ api.addToInput(inputKey, errorResponse);
684
+ }
685
+ if (errorHandling === "continue") {
686
+ // output the timeout message
687
+ if (errorMessage) {
688
+ await ((_e = api.output) === null || _e === void 0 ? void 0 : _e.call(api, errorMessage, null));
689
+ }
690
+ // Continue with default node as next node
691
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
692
+ if (defaultChild) {
693
+ api.setNextNode(defaultChild.id);
694
+ }
695
+ }
696
+ else if (errorHandling === "goto") {
697
+ if (!errorHandlingGotoTarget) {
698
+ throw new Error("GoTo Target is required");
699
+ }
700
+ const gotoParams = {
701
+ cognigy,
702
+ childConfigs: [],
703
+ nodeId,
704
+ config: {
705
+ flowNode: {
706
+ flow: errorHandlingGotoTarget.flow,
707
+ node: errorHandlingGotoTarget.node,
708
+ },
709
+ injectedText: input.text,
710
+ injectedData: input.data,
711
+ executionMode: "continue",
712
+ absorbContext: false
713
+ }
714
+ };
715
+ await ((_f = logic_1.GO_TO.function) === null || _f === void 0 ? void 0 : _f.call(logic_1.GO_TO, gotoParams));
716
+ }
717
+ else {
718
+ throw new errors_1.InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
719
+ }
720
+ };
721
+ try {
722
+ const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
723
+ const _messageId = (0, crypto_1.randomUUID)();
724
+ /**
725
+ * Retrieve the tool definitions from the child nodes
726
+ */
727
+ const { toolIds, toolNames, toolMap, tools } = await (0, createToolDefinitions_1.createToolDefinitions)(childConfigs, api, useStrict);
728
+ /**
729
+ * Generate Prompt Options
730
+ */
731
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt,
732
+ temperature,
733
+ maxTokens,
734
+ topP,
735
+ presencePenalty,
736
+ frequencyPenalty, timeoutInMs: timeout, useCase: "promptNode", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
737
+ var _a;
738
+ text = isStreamingChannel ? text : text.trim();
739
+ if (text) {
740
+ // if we got text, we output it, but prevent it from being added to the transcript
741
+ (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
742
+ _cognigy: {
743
+ _preventTranscript: true,
744
+ _messageId,
745
+ }
746
+ });
747
+ }
748
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], streamStopTokenOverrides, preventNewLineRemoval: isStreamingChannel ? true : false,
749
+ // set to true in order to get token usage
750
+ detailedResults: true, seed: Number(seed) ? Number(seed) : undefined }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), { customModelOptions,
751
+ customRequestOptions });
752
+ if (useStop) {
753
+ llmPromptOptions["stop"] = stop;
754
+ }
755
+ // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
756
+ if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
757
+ llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
758
+ }
759
+ if (processImages) {
760
+ llmPromptOptions["imageOptions"] = {
761
+ processImages,
762
+ transcriptImageHandling
763
+ };
764
+ }
765
+ if (responseFormat && responseFormat !== "default") {
766
+ llmPromptOptions["responseFormat"] = responseFormat;
767
+ }
768
+ let debugPrompt = prompt;
769
+ // if we're not using prompt mode, we need to add the system message and the transcript
770
+ // this is the equivalent of the old "useChat" mode
771
+ if (!usePromptMode) {
772
+ const transcript = await api.getTranscript({
773
+ limit: chatTranscriptSteps || 50,
774
+ rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
775
+ excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT]
776
+ });
777
+ llmPromptOptions["transcript"] = transcript;
778
+ llmPromptOptions["chat"] = [{
779
+ role: "system",
780
+ content: prompt
781
+ }];
782
+ }
783
+ // Run the LLM Query
784
+ const fullLlmResult = await api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
785
+ const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
786
+ const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
787
+ if (debugLogToolDefinitions) {
788
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
789
+ }
790
+ // if we're in adminconsole or following a session, process debugging options
791
+ (input.endpointType === "adminconsole" || isFollowSessionActive) && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
792
+ if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
793
+ const mainToolCall = llmResult.toolCalls[0];
794
+ let isMcpToolCall = false;
795
+ // Find the child node with the toolId of the tool call
796
+ let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "llmPromptTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
797
+ if (!toolChild && toolMap.has(mainToolCall.function.name)) {
798
+ // If the tool call is from an MCP tool, set the next node to the corresponding child node
799
+ toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
800
+ isMcpToolCall = true;
801
+ }
802
+ if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
803
+ (_d = api.logDebugError) === null || _d === void 0 ? void 0 : _d.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
804
+ }
805
+ // Add last tool call to session state for loading it from Tool Answer Node
806
+ api.updateSessionStateValues({
807
+ lastToolCall: Object.assign(Object.assign({ llmProvider: fullLlmResult.llmProvider, aiAgentJobNode: {
808
+ flow: flowReferenceId,
809
+ node: nodeId,
810
+ } }, (isMcpToolCall && {
811
+ mcpServerUrl: (_e = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _e === void 0 ? void 0 : _e.mcpServerUrl,
812
+ timeout: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.timeout,
813
+ mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
814
+ })), { toolCall: mainToolCall }),
815
+ });
816
+ // if there are any parameters/arguments, add them to the input slots
817
+ if (mainToolCall.function.arguments) {
818
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_h = (_g = input.llmPrompt) === null || _g === void 0 ? void 0 : _g.toolArgs) !== null && _h !== void 0 ? _h : {}), mainToolCall.function.arguments) });
819
+ }
820
+ // Debug Message for Tool Calls, configured in the Tool Node
821
+ if ((_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.debugMessage) {
822
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
823
+ // Arguments / Parameters Slots
824
+ const slots = ((_k = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _k === void 0 ? void 0 : _k.arguments) && Object.keys(mainToolCall.function.arguments);
825
+ const hasSlots = slots && slots.length > 0;
826
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
827
+ if (hasSlots) {
828
+ slots.forEach(slot => {
829
+ let slotValueAsString = mainToolCall.function.arguments[slot];
830
+ if (typeof slotValueAsString === "object" && slotValueAsString !== null) {
831
+ slotValueAsString = JSON.stringify(slotValueAsString, null, 2);
832
+ }
833
+ else {
834
+ String(slotValueAsString);
835
+ }
836
+ messageLines.push(`- ${slot}: ${slotValueAsString}`);
837
+ });
838
+ }
839
+ (_l = api.logDebugMessage) === null || _l === void 0 ? void 0 : _l.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
840
+ }
841
+ if (toolChild) {
842
+ api.setNextNode(toolChild.id);
843
+ }
844
+ else {
845
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
846
+ if (defaultChild) {
847
+ api.setNextNode(defaultChild.id);
848
+ }
849
+ }
850
+ }
851
+ else {
852
+ // Default case
853
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
854
+ if (defaultChild) {
855
+ api.setNextNode(defaultChild.id);
856
+ }
857
+ }
858
+ // Optionally output the result immediately
859
+ // This will also store it into the output
860
+ if (llmResult.result && immediateOutput && !llmPromptOptions.stream) {
861
+ // we stringify objects (e.g. results coming from JSON Mode)
862
+ // so that the transcript only contains text
863
+ const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
864
+ await ((_m = api.output) === null || _m === void 0 ? void 0 : _m.call(api, resultToOutput, {}));
865
+ }
866
+ else if (llmResult.finishReason && llmPromptOptions.stream) {
867
+ // send the finishReason as last output for a stream
868
+ (_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, "", {
869
+ _cognigy: {
870
+ _preventTranscript: true,
871
+ _messageId,
872
+ _finishReason: llmResult.finishReason,
873
+ }
874
+ });
875
+ }
876
+ // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
877
+ if (llmResult.result && llmPromptOptions.stream) {
878
+ const transcriptContent = {
879
+ role: transcripts_1.TranscriptRole.ASSISTANT,
880
+ type: transcripts_1.TranscriptEntryType.OUTPUT,
881
+ source: "assistant",
882
+ payload: {
883
+ text: ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult),
884
+ data: {},
885
+ }
886
+ };
887
+ await api.addTranscriptStep(transcriptContent);
888
+ }
889
+ // Add response to Cognigy Input/Context for further usage
890
+ if (storeLocation === "context") {
891
+ (_p = api.addToContext) === null || _p === void 0 ? void 0 : _p.call(api, contextKey, llmResult, "simple");
892
+ }
893
+ else if (storeLocation === "input") {
894
+ api.addToInput(inputKey, llmResult);
895
+ }
896
+ else if (storeLocation === "stream" && streamStoreCopyInInput) {
897
+ api.addToInput(inputKey, llmResult);
898
+ }
899
+ }
900
+ catch (error) {
901
+ const errorDetailsBase = {
902
+ name: error === null || error === void 0 ? void 0 : error.name,
903
+ code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
904
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_q = error.originalErrorDetails) === null || _q === void 0 ? void 0 : _q.message),
905
+ };
906
+ const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
907
+ // return the requestId if it exist in the error obj.
908
+ if ((_r = error.meta) === null || _r === void 0 ? void 0 : _r.requestId) {
909
+ errorDetails["meta"] = {
910
+ requestId: (_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId
911
+ };
912
+ }
913
+ if (logErrorToSystem) {
914
+ (_t = api.log) === null || _t === void 0 ? void 0 : _t.call(api, "error", JSON.stringify(errorDetailsBase));
915
+ }
916
+ (_u = api.logDebugError) === null || _u === void 0 ? void 0 : _u.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
917
+ await handleServiceError(errorDetails);
918
+ return;
919
+ }
920
+ }
921
+ });
922
+ //# sourceMappingURL=LLMPromptV2.js.map