@cognigy/rest-api-client 2025.11.0 → 2025.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/RestAPIClient.js +7 -0
  3. package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -0
  4. package/build/apigroups/SimulationAPIGroup_2_0.js +58 -0
  5. package/build/apigroups/index.js +3 -1
  6. package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  7. package/build/shared/charts/descriptors/index.js +7 -1
  8. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +4 -2
  9. package/build/shared/charts/descriptors/message/question/question.js +24 -5
  10. package/build/shared/charts/descriptors/service/GPTPrompt.js +15 -1
  11. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +32 -173
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
  14. package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
  15. package/build/shared/charts/descriptors/service/aiOpsCenterConnection.js +12 -0
  16. package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
  17. package/build/shared/charts/descriptors/service/index.js +13 -1
  18. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +922 -0
  19. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
  20. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
  21. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
  22. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +2 -2
  23. package/build/shared/constants.js +1 -5
  24. package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
  25. package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  26. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  27. package/build/shared/interfaces/resources/IAuditEvent.js +3 -0
  28. package/build/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  29. package/build/shared/interfaces/resources/TResourceType.js +2 -0
  30. package/build/shared/interfaces/resources/TRestChannelType.js +5 -0
  31. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  32. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
  33. package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
  34. package/build/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +1 -1
  35. package/build/shared/interfaces/restAPI/simulation/simulation/ICreateSimulationRest_2_0.js +3 -0
  36. package/build/shared/interfaces/restAPI/simulation/simulation/IDeleteSimulationRest_2_0.js +3 -0
  37. package/build/shared/interfaces/restAPI/simulation/simulation/IIndexSimulationRest_2_0.js +3 -0
  38. package/build/shared/interfaces/restAPI/simulation/simulation/IReadSimulationRest_2_0.js +3 -0
  39. package/build/shared/interfaces/restAPI/simulation/simulation/IScheduleSimulationRest_2_0.js +3 -0
  40. package/build/shared/interfaces/restAPI/simulation/simulation/ISimulationRest_2_0.js +9 -0
  41. package/build/shared/interfaces/restAPI/simulation/simulation/IUpdateSimulationRest_2_0.js +3 -0
  42. package/build/shared/interfaces/restAPI/simulation/simulationRun/IIndexSimulationRunRest_2_0.js +3 -0
  43. package/build/shared/interfaces/restAPI/simulation/simulationRun/IReadSimulationRunRest_2_0.js +3 -0
  44. package/build/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +19 -0
  45. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IGetAllSimulationRunBatchRest_2_0.js +3 -0
  46. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IIndexSimulationRunBatchRest_2_0.js +3 -0
  47. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IReadSimulationRunBatchRest_2_0.js +3 -0
  48. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/ISimulationRunBatchRest_2_0.js +9 -0
  49. package/build/shared/interfaces/security/IPermission.js +2 -0
  50. package/build/shared/interfaces/security/IRole.js +3 -1
  51. package/build/shared/interfaces/security/index.js +1 -1
  52. package/dist/esm/RestAPIClient.js +7 -0
  53. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -0
  54. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +44 -0
  55. package/dist/esm/apigroups/index.js +1 -0
  56. package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
  57. package/dist/esm/shared/charts/descriptors/index.js +8 -2
  58. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +4 -2
  59. package/dist/esm/shared/charts/descriptors/message/question/question.js +24 -5
  60. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +15 -1
  61. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +33 -174
  62. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
  63. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
  64. package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
  65. package/dist/esm/shared/charts/descriptors/service/aiOpsCenterConnection.js +9 -0
  66. package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
  67. package/dist/esm/shared/charts/descriptors/service/index.js +6 -0
  68. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +909 -0
  69. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
  70. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
  71. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
  72. package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +2 -2
  73. package/dist/esm/shared/constants.js +1 -5
  74. package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
  75. package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
  76. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
  77. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +3 -0
  78. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  79. package/dist/esm/shared/interfaces/resources/TResourceType.js +2 -0
  80. package/dist/esm/shared/interfaces/resources/TRestChannelType.js +5 -0
  81. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
  82. package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
  83. package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
  84. package/dist/esm/shared/interfaces/restAPI/opsCenter/observationConfig/IOpsCenterObservationConfig.js +1 -1
  85. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/ICreateSimulationRest_2_0.js +2 -0
  86. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IDeleteSimulationRest_2_0.js +2 -0
  87. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IIndexSimulationRest_2_0.js +2 -0
  88. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IReadSimulationRest_2_0.js +2 -0
  89. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IScheduleSimulationRest_2_0.js +2 -0
  90. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/ISimulationRest_2_0.js +6 -0
  91. package/dist/esm/shared/interfaces/restAPI/simulation/simulation/IUpdateSimulationRest_2_0.js +2 -0
  92. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/IIndexSimulationRunRest_2_0.js +2 -0
  93. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/IReadSimulationRunRest_2_0.js +2 -0
  94. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +16 -0
  95. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IGetAllSimulationRunBatchRest_2_0.js +2 -0
  96. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IIndexSimulationRunBatchRest_2_0.js +2 -0
  97. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IReadSimulationRunBatchRest_2_0.js +2 -0
  98. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/ISimulationRunBatchRest_2_0.js +6 -0
  99. package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
  100. package/dist/esm/shared/interfaces/security/IRole.js +3 -1
  101. package/dist/esm/shared/interfaces/security/index.js +1 -1
  102. package/package.json +1 -1
  103. package/types/index.d.ts +394 -45
@@ -0,0 +1,909 @@
1
+ import { __awaiter, __rest } from "tslib";
2
+ /* Custom modules */
3
+ import { createNodeDescriptor } from "../../../createNodeDescriptor";
4
+ import { GO_TO } from "../../logic";
5
+ import { randomUUID } from 'crypto';
6
+ import { createToolDefinitions } from "../aiAgent/helpers/createToolDefinitions";
7
+ import { createLastConverationString, createLastUserInputString, writeLLMDebugLogs } from "../../nlu/generativeSlotFiller/prompt";
8
+ import { InternalServerError } from "../../../../errors";
9
+ import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
10
+ export const LLM_PROMPT_V2 = createNodeDescriptor({
11
+ type: "llmPromptV2",
12
+ defaultLabel: "LLM Prompt",
13
+ summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
14
+ constraints: {
15
+ collapsable: true,
16
+ placement: {
17
+ children: {
18
+ whitelist: ["llmPromptDefault", "llmPromptTool", "llmPromptMCPTool"],
19
+ },
20
+ },
21
+ },
22
+ behavior: {
23
+ entrypoint: true
24
+ },
25
+ dependencies: {
26
+ children: ["llmPromptDefault", "llmPromptTool"]
27
+ },
28
+ preview: {
29
+ type: "text",
30
+ key: "prompt",
31
+ },
32
+ fields: [
33
+ {
34
+ key: "llmProviderReferenceId",
35
+ type: "llmSelect",
36
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LLM_SELECT__LABEL",
37
+ defaultValue: "default",
38
+ params: {
39
+ required: true
40
+ }
41
+ },
42
+ {
43
+ key: "prompt",
44
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__LABEL",
45
+ type: "cognigyLLMText",
46
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__PROMPT__DESCRIPTION",
47
+ params: {
48
+ multiline: true,
49
+ rows: 5,
50
+ required: false
51
+ },
52
+ defaultValue: ""
53
+ },
54
+ {
55
+ key: "chatTranscriptSteps",
56
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__LABEL",
57
+ type: "slider",
58
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TRANSCRIPT_STEPS__DESCRIPTION",
59
+ defaultValue: 50,
60
+ params: {
61
+ min: 0,
62
+ max: 50,
63
+ step: 1
64
+ },
65
+ condition: {
66
+ key: "usePromptMode",
67
+ value: false,
68
+ }
69
+ },
70
+ {
71
+ key: "usePromptMode",
72
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_PROMPT__LABEL",
73
+ type: "toggle",
74
+ params: {
75
+ required: true
76
+ },
77
+ defaultValue: false
78
+ },
79
+ {
80
+ key: "samplingMethod",
81
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__LABEL",
82
+ type: "select",
83
+ defaultValue: "temperature",
84
+ params: {
85
+ options: [
86
+ {
87
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TEMPERATURE__LABEL",
88
+ value: "temperature"
89
+ },
90
+ {
91
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SAMPLING_METHOD__OPTIONS__TOP_PERCENTATGE__LABEL",
92
+ value: "topP"
93
+ }
94
+ ]
95
+ }
96
+ },
97
+ {
98
+ key: "temperature",
99
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__LABEL",
100
+ type: "slider",
101
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__DESCRIPTION",
102
+ defaultValue: 0.7,
103
+ params: {
104
+ min: 0,
105
+ max: 1,
106
+ step: 0.1
107
+ },
108
+ condition: {
109
+ key: "samplingMethod",
110
+ value: "temperature",
111
+ }
112
+ },
113
+ {
114
+ key: "topP",
115
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__LABEL",
116
+ type: "slider",
117
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TOP_P__DESCRIPTION",
118
+ defaultValue: 1,
119
+ params: {
120
+ min: 0,
121
+ max: 1,
122
+ step: 0.1
123
+ },
124
+ condition: {
125
+ key: "samplingMethod",
126
+ value: "topP",
127
+ }
128
+ },
129
+ {
130
+ key: "maxTokens",
131
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__LABEL",
132
+ type: "slider",
133
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__MAX_TOKENS__DESCRIPTION",
134
+ defaultValue: 1000,
135
+ params: {
136
+ min: 100,
137
+ max: 16000,
138
+ step: 100
139
+ }
140
+ },
141
+ {
142
+ key: "frequencyPenalty",
143
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__LABEL",
144
+ type: "slider",
145
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__FREQUENCY_PENALTY__DESCRIPTION",
146
+ defaultValue: 0,
147
+ params: {
148
+ min: -2,
149
+ max: 2,
150
+ step: 0.1
151
+ }
152
+ },
153
+ {
154
+ key: "presencePenalty",
155
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__LABEL",
156
+ type: "slider",
157
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PRESENCE_PENALTY__DESCRIPTION",
158
+ defaultValue: 0,
159
+ params: {
160
+ min: -2,
161
+ max: 2,
162
+ step: 0.1
163
+ }
164
+ },
165
+ {
166
+ key: "useStop",
167
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__LABEL",
168
+ type: "toggle",
169
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__USE_STOP__DESCRIPTION",
170
+ defaultValue: false
171
+ },
172
+ {
173
+ key: "stop",
174
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__LABEL",
175
+ type: "textArray",
176
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STOP__DESCRIPTION",
177
+ condition: {
178
+ key: "useStop",
179
+ value: true
180
+ }
181
+ },
182
+ {
183
+ key: "timeout",
184
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__LABEL",
185
+ defaultValue: 8000,
186
+ type: "number",
187
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__DESCRIPTION",
188
+ },
189
+ {
190
+ key: "storeLocation",
191
+ type: "select",
192
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__LABEL",
193
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__DESCRIPTION",
194
+ defaultValue: "stream",
195
+ params: {
196
+ options: [
197
+ {
198
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__INPUT__LABEL",
199
+ value: "input"
200
+ },
201
+ {
202
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__CONTEXT__LABEL",
203
+ value: "context"
204
+ },
205
+ {
206
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STORE_LOCATION__OPTIONS__STREAM__LABEL",
207
+ value: "stream"
208
+ }
209
+ ],
210
+ required: true
211
+ },
212
+ },
213
+ {
214
+ key: "immediateOutput",
215
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__IMMEDIATEOUTPUT__LABEL",
216
+ type: "toggle",
217
+ defaultValue: true,
218
+ condition: {
219
+ or: [
220
+ {
221
+ key: "storeLocation",
222
+ value: "input",
223
+ },
224
+ {
225
+ key: "storeLocation",
226
+ value: "context",
227
+ }
228
+ ]
229
+ }
230
+ },
231
+ {
232
+ key: "inputKey",
233
+ type: "cognigyText",
234
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__INPUT_KEY__LABEL",
235
+ defaultValue: "promptResult",
236
+ condition: {
237
+ or: [
238
+ {
239
+ key: "storeLocation",
240
+ value: "input",
241
+ },
242
+ {
243
+ and: [
244
+ {
245
+ key: "storeLocation",
246
+ value: "stream",
247
+ },
248
+ {
249
+ key: "streamStoreCopyInInput",
250
+ value: true,
251
+ }
252
+ ]
253
+ }
254
+ ]
255
+ }
256
+ },
257
+ {
258
+ key: "contextKey",
259
+ type: "cognigyText",
260
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CONTEXT_KEY__LABEL",
261
+ defaultValue: "promptResult",
262
+ condition: {
263
+ key: "storeLocation",
264
+ value: "context",
265
+ }
266
+ },
267
+ {
268
+ key: "streamStopTokens",
269
+ type: "textArray",
270
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
271
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
272
+ defaultValue: [".", "!", "?", "\\n"],
273
+ condition: {
274
+ key: "storeLocation",
275
+ value: "stream",
276
+ }
277
+ },
278
+ {
279
+ key: "streamStopTokenOverrides",
280
+ type: "textArray",
281
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__LABEL",
282
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKEN_OVERRIDES__DESCRIPTION",
283
+ defaultValue: ["\d+\."],
284
+ condition: {
285
+ key: "storeLocation",
286
+ value: "stream",
287
+ }
288
+ },
289
+ {
290
+ key: "streamDescription",
291
+ type: "description",
292
+ label: " ",
293
+ params: {
294
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_DESCRIPTION__TEXT"
295
+ },
296
+ condition: {
297
+ key: "storeLocation",
298
+ value: "stream",
299
+ }
300
+ },
301
+ {
302
+ key: "streamStoreCopyInInput",
303
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__LABEL",
304
+ type: "toggle",
305
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__DESCRIPTION",
306
+ defaultValue: false,
307
+ condition: {
308
+ key: "storeLocation",
309
+ value: "stream",
310
+ }
311
+ },
312
+ {
313
+ key: "debugLogTokenCount",
314
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__LABEL",
315
+ type: "toggle",
316
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__DESCRIPTION",
317
+ defaultValue: false
318
+ },
319
+ {
320
+ key: "debugLogRequestAndCompletion",
321
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__LABEL",
322
+ type: "toggle",
323
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__DESCRIPTION",
324
+ defaultValue: false
325
+ },
326
+ {
327
+ key: "debugDescription",
328
+ type: "description",
329
+ label: " ",
330
+ params: {
331
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUG_DESCRIPTION__TEXT"
332
+ }
333
+ },
334
+ {
335
+ key: "responseFormat",
336
+ type: "select",
337
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__LABEL",
338
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__DESCRIPTION",
339
+ defaultValue: "default",
340
+ params: {
341
+ options: [
342
+ {
343
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__DEFAULT__LABEL",
344
+ value: "default"
345
+ },
346
+ {
347
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__TEXT__LABEL",
348
+ value: "text"
349
+ },
350
+ {
351
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__JSON__LABEL",
352
+ value: "json_object"
353
+ }
354
+ ]
355
+ },
356
+ },
357
+ {
358
+ key: "seed",
359
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__LABEL",
360
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__DESCRIPTION",
361
+ type: "cognigyText",
362
+ defaultValue: ""
363
+ },
364
+ {
365
+ key: "jsonStreamWarning",
366
+ type: "description",
367
+ label: " ",
368
+ params: {
369
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__JSONSTREAMWARNING__PARAM"
370
+ },
371
+ condition: {
372
+ and: [
373
+ {
374
+ key: "responseFormat",
375
+ value: "json_object"
376
+ },
377
+ {
378
+ key: "storeLocation",
379
+ value: "stream",
380
+ }
381
+ ]
382
+ }
383
+ },
384
+ {
385
+ key: "customModelOptions",
386
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
387
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__DESCRIPTION",
388
+ type: "json",
389
+ defaultValue: {}
390
+ },
391
+ {
392
+ key: "customRequestOptions",
393
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__LABEL",
394
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__DESCRIPTION",
395
+ type: "json",
396
+ defaultValue: {}
397
+ },
398
+ {
399
+ key: "logErrorToSystem",
400
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__LABEL",
401
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
402
+ type: "toggle",
403
+ defaultValue: false,
404
+ },
405
+ {
406
+ key: "errorHandling",
407
+ type: "select",
408
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__LABEL",
409
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__DESCRIPTION",
410
+ defaultValue: "continue",
411
+ params: {
412
+ options: [
413
+ {
414
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__STOP__LABEL",
415
+ value: "stop"
416
+ },
417
+ {
418
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__CONTINUE__LABEL",
419
+ value: "continue"
420
+ },
421
+ {
422
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__GOTO__LABEL",
423
+ value: "goto"
424
+ },
425
+ ]
426
+ }
427
+ },
428
+ {
429
+ key: "errorMessage",
430
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__LABEL",
431
+ type: "cognigyText",
432
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__DESCRIPTION",
433
+ defaultValue: "",
434
+ condition: {
435
+ key: "errorHandling",
436
+ value: "continue"
437
+ }
438
+ },
439
+ {
440
+ key: "errorHandlingGotoTarget",
441
+ type: "flowNode",
442
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR__GOTO_NODE__LABEL",
443
+ condition: {
444
+ key: "errorHandling",
445
+ value: "goto"
446
+ }
447
+ },
448
+ {
449
+ key: "toolChoice",
450
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__LABEL",
451
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__DESCRIPTION",
452
+ type: "select",
453
+ defaultValue: "auto",
454
+ params: {
455
+ options: [
456
+ {
457
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__AUTO__LABEL',
458
+ value: 'auto'
459
+ },
460
+ {
461
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__REQUIRED__LABEL',
462
+ value: 'required'
463
+ },
464
+ {
465
+ label: 'UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TOOL_CHOICE__OPTIONS__NONE__LABEL',
466
+ value: 'none'
467
+ }
468
+ ]
469
+ }
470
+ },
471
+ {
472
+ key: "useStrict",
473
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__LABEL",
474
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__USE_STRICT__DESCRIPTION",
475
+ type: "toggle",
476
+ defaultValue: false
477
+ },
478
+ {
479
+ key: "processImages",
480
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__LABEL",
481
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__PROCESS_IMAGES__DESCRIPTION",
482
+ type: "toggle",
483
+ defaultValue: false
484
+ },
485
+ {
486
+ key: "transcriptImageHandling",
487
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__LABEL",
488
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__DESCRIPTION",
489
+ type: "select",
490
+ defaultValue: "minify",
491
+ params: {
492
+ options: [
493
+ {
494
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__MINIFY__LABEL",
495
+ value: "minify"
496
+ },
497
+ {
498
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__DROP__LABEL",
499
+ value: "drop"
500
+ },
501
+ {
502
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__TRANSCRIPT_IMAGES__OPTIONS__KEEP__LABEL",
503
+ value: "keep"
504
+ }
505
+ ],
506
+ },
507
+ },
508
+ {
509
+ key: "debugLogToolDefinitions",
510
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__LABEL",
511
+ type: "toggle",
512
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TOOL_DEFINITIONS__DESCRIPTION",
513
+ defaultValue: false
514
+ },
515
+ ],
516
+ sections: [
517
+ {
518
+ key: "advanced",
519
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ADVANCED__LABEL",
520
+ defaultCollapsed: true,
521
+ fields: [
522
+ "maxTokens",
523
+ "usePromptMode",
524
+ "chatTranscriptSteps",
525
+ "responseFormat",
526
+ "jsonStreamWarning",
527
+ "timeout",
528
+ "samplingMethod",
529
+ "temperature",
530
+ "topP",
531
+ "presencePenalty",
532
+ "frequencyPenalty",
533
+ "useStop",
534
+ "stop",
535
+ "seed"
536
+ ]
537
+ },
538
+ {
539
+ key: "storage",
540
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__STORAGE__LABEL",
541
+ defaultCollapsed: true,
542
+ fields: [
543
+ "storeLocation",
544
+ "jsonStreamWarning",
545
+ "streamDescription",
546
+ "inputKey",
547
+ "contextKey",
548
+ "immediateOutput",
549
+ "streamStopTokens",
550
+ "streamStopTokenOverrides",
551
+ "streamStoreCopyInInput"
552
+ ]
553
+ },
554
+ {
555
+ key: "errorHandling",
556
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ERROR_HANDLING__LABEL",
557
+ defaultCollapsed: true,
558
+ fields: [
559
+ "logErrorToSystem",
560
+ "errorHandling",
561
+ "errorMessage",
562
+ "errorHandlingGotoTarget",
563
+ ]
564
+ },
565
+ {
566
+ key: "customOptions",
567
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__CUSTOM_OPTIONS__LABEL",
568
+ defaultCollapsed: true,
569
+ fields: [
570
+ "customModelOptions",
571
+ "customRequestOptions"
572
+ ]
573
+ },
574
+ {
575
+ key: "debugging",
576
+ label: "UI__NODE_EDITOR__SECTIONS__DEBUG_SETTINGS__LABEL",
577
+ defaultCollapsed: true,
578
+ fields: [
579
+ "debugDescription",
580
+ "debugLogTokenCount",
581
+ "debugLogRequestAndCompletion",
582
+ "debugLogToolDefinitions"
583
+ ]
584
+ },
585
+ {
586
+ key: "toolSettings",
587
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__TOOL_SETTINGS__LABEL",
588
+ defaultCollapsed: true,
589
+ fields: [
590
+ "toolChoice",
591
+ "useStrict",
592
+ ],
593
+ },
594
+ {
595
+ key: "imageHandling",
596
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__IMAGE_HANDLING__LABEL",
597
+ defaultCollapsed: true,
598
+ fields: [
599
+ "processImages",
600
+ "transcriptImageHandling"
601
+ ]
602
+ },
603
+ ],
604
+ form: [
605
+ { type: "field", key: "llmProviderReferenceId" },
606
+ { type: "field", key: "prompt" },
607
+ { type: "section", key: "advanced" },
608
+ { type: "section", key: "storage" },
609
+ { type: "section", key: "toolSettings" },
610
+ { type: "section", key: "imageHandling" },
611
+ { type: "section", key: "errorHandling" },
612
+ { type: "section", key: "customOptions" },
613
+ { type: "section", key: "debugging" },
614
+ ],
615
+ appearance: {
616
+ color: "#252525",
617
+ },
618
+ tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
619
+ function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
620
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
621
+ const { api, input, flowReferenceId } = cognigy;
622
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
623
+ errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
624
+ let prompt = config.prompt || "";
625
+ const { traceId } = input;
626
+ // check if custom variables are used and if they have a length modifier
627
+ // works only for a single variable per prompt
628
+ if (prompt.includes("@cognigyRecentConversation")) {
629
+ let turnLimit;
630
+ if (prompt.match(/@cognigyRecentConversation:(\d+)/)) {
631
+ // @cognigyRecentConversation has a length modifier (e.g. @cognigyRecentConversation:5), so we just want to return the top 5 turns
632
+ turnLimit = Number((_a = prompt.match(/@cognigyRecentConversation:(\d+)/)) === null || _a === void 0 ? void 0 : _a[1]);
633
+ }
634
+ const recentConversation = createLastConverationString(cognigy.lastConversationEntries, turnLimit) + "\n";
635
+ prompt = prompt.replace(/@cognigyRecentConversation(:\d+)?/, recentConversation);
636
+ }
637
+ if (prompt.includes("@cognigyRecentUserInputs")) {
638
+ let turnLimit;
639
+ if (prompt.match(/@cognigyRecentUserInputs:(\d+)/)) {
640
+ // @cognigyRecentUserInputs has a length modifier (e.g. @cognigyRecentUserInputs:5), so we just want to return the top 5 entries
641
+ turnLimit = Number((_b = prompt.match(/@cognigyRecentUserInputs:(\d+)/)) === null || _b === void 0 ? void 0 : _b[1]);
642
+ }
643
+ const recentUserInputs = createLastUserInputString(cognigy.lastConversationEntries, turnLimit) + "\n";
644
+ prompt = prompt.replace(/@cognigyRecentUserInputs(:\d+)?/, recentUserInputs);
645
+ }
646
+ // handle errors from external services, depending on the settings
647
+ const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
648
+ var _v, _w, _x, _y, _z, _0;
649
+ const compactError = {
650
+ name: error === null || error === void 0 ? void 0 : error.name,
651
+ code: error === null || error === void 0 ? void 0 : error.code,
652
+ message: (error === null || error === void 0 ? void 0 : error.message) || error
653
+ };
654
+ // return the requestId if it exist in the error obj.
655
+ if ((_v = error === null || error === void 0 ? void 0 : error.meta) === null || _v === void 0 ? void 0 : _v.requestId) {
656
+ compactError["requestId"] = (_w = error === null || error === void 0 ? void 0 : error.meta) === null || _w === void 0 ? void 0 : _w.requestId;
657
+ }
658
+ if ((_x = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _x === void 0 ? void 0 : _x.code) {
659
+ compactError.code = error.originalErrorDetails.code;
660
+ }
661
+ const errorResponse = {
662
+ error: compactError,
663
+ };
664
+ // add error to context or input
665
+ switch (storeLocation) {
666
+ case "context":
667
+ (_y = api.addToContext) === null || _y === void 0 ? void 0 : _y.call(api, contextKey, errorResponse, "simple");
668
+ break;
669
+ default:
670
+ api.addToInput(inputKey, errorResponse);
671
+ }
672
+ if (errorHandling === "continue") {
673
+ // output the timeout message
674
+ if (errorMessage) {
675
+ yield ((_z = api.output) === null || _z === void 0 ? void 0 : _z.call(api, errorMessage, null));
676
+ }
677
+ // Continue with default node as next node
678
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
679
+ if (defaultChild) {
680
+ api.setNextNode(defaultChild.id);
681
+ }
682
+ }
683
+ else if (errorHandling === "goto") {
684
+ if (!errorHandlingGotoTarget) {
685
+ throw new Error("GoTo Target is required");
686
+ }
687
+ const gotoParams = {
688
+ cognigy,
689
+ childConfigs: [],
690
+ nodeId,
691
+ config: {
692
+ flowNode: {
693
+ flow: errorHandlingGotoTarget.flow,
694
+ node: errorHandlingGotoTarget.node,
695
+ },
696
+ injectedText: input.text,
697
+ injectedData: input.data,
698
+ executionMode: "continue",
699
+ absorbContext: false
700
+ }
701
+ };
702
+ yield ((_0 = GO_TO.function) === null || _0 === void 0 ? void 0 : _0.call(GO_TO, gotoParams));
703
+ }
704
+ else {
705
+ throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
706
+ }
707
+ });
708
+ try {
709
+ const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
710
+ const _messageId = randomUUID();
711
+ /**
712
+ * Retrieve the tool definitions from the child nodes
713
+ */
714
+ const { toolIds, toolNames, toolMap, tools } = yield createToolDefinitions(childConfigs, api, useStrict);
715
+ /**
716
+ * Generate Prompt Options
717
+ */
718
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt,
719
+ temperature,
720
+ maxTokens,
721
+ topP,
722
+ presencePenalty,
723
+ frequencyPenalty, timeoutInMs: timeout, useCase: "promptNode", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
724
+ var _a;
725
+ text = isStreamingChannel ? text : text.trim();
726
+ if (text) {
727
+ // if we got text, we output it, but prevent it from being added to the transcript
728
+ (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
729
+ _cognigy: {
730
+ _preventTranscript: true,
731
+ _messageId,
732
+ }
733
+ });
734
+ }
735
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], streamStopTokenOverrides, preventNewLineRemoval: isStreamingChannel ? true : false,
736
+ // set to true in order to get token usage
737
+ detailedResults: true, seed: Number(seed) ? Number(seed) : undefined }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), { customModelOptions,
738
+ customRequestOptions });
739
+ if (useStop) {
740
+ llmPromptOptions["stop"] = stop;
741
+ }
742
+ // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
743
+ if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
744
+ llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
745
+ }
746
+ if (processImages) {
747
+ llmPromptOptions["imageOptions"] = {
748
+ processImages,
749
+ transcriptImageHandling
750
+ };
751
+ }
752
+ if (responseFormat && responseFormat !== "default") {
753
+ llmPromptOptions["responseFormat"] = responseFormat;
754
+ }
755
+ let debugPrompt = prompt;
756
+ // if we're not using prompt mode, we need to add the system message and the transcript
757
+ // this is the equivalent of the old "useChat" mode
758
+ if (!usePromptMode) {
759
+ const transcript = yield api.getTranscript({
760
+ limit: chatTranscriptSteps || 50,
761
+ rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
762
+ excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
763
+ });
764
+ llmPromptOptions["transcript"] = transcript;
765
+ llmPromptOptions["chat"] = [{
766
+ role: "system",
767
+ content: prompt
768
+ }];
769
+ }
770
+ // Run the LLM Query
771
+ const fullLlmResult = yield api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
772
+ const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
773
+ const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
774
+ if (debugLogToolDefinitions) {
775
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
776
+ }
777
+ // if we're in adminconsole or following a session, process debugging options
778
+ (input.endpointType === "adminconsole" || isFollowSessionActive) && writeLLMDebugLogs("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
779
+ if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
780
+ const mainToolCall = llmResult.toolCalls[0];
781
+ let isMcpToolCall = false;
782
+ // Find the child node with the toolId of the tool call
783
+ let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "llmPromptTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
784
+ if (!toolChild && toolMap.has(mainToolCall.function.name)) {
785
+ // If the tool call is from an MCP tool, set the next node to the corresponding child node
786
+ toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
787
+ isMcpToolCall = true;
788
+ }
789
+ if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
790
+ (_d = api.logDebugError) === null || _d === void 0 ? void 0 : _d.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
791
+ }
792
+ // Add last tool call to session state for loading it from Tool Answer Node
793
+ api.updateSessionStateValues({
794
+ lastToolCall: Object.assign(Object.assign({ llmProvider: fullLlmResult.llmProvider, aiAgentJobNode: {
795
+ flow: flowReferenceId,
796
+ node: nodeId,
797
+ } }, (isMcpToolCall && {
798
+ mcpServerUrl: (_e = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _e === void 0 ? void 0 : _e.mcpServerUrl,
799
+ timeout: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.timeout,
800
+ mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
801
+ })), { toolCall: mainToolCall }),
802
+ });
803
+ // if there are any parameters/arguments, add them to the input slots
804
+ if (mainToolCall.function.arguments) {
805
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_h = (_g = input.llmPrompt) === null || _g === void 0 ? void 0 : _g.toolArgs) !== null && _h !== void 0 ? _h : {}), mainToolCall.function.arguments) });
806
+ }
807
+ // Debug Message for Tool Calls, configured in the Tool Node
808
+ if ((_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.debugMessage) {
809
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
810
+ // Arguments / Parameters Slots
811
+ const slots = ((_k = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _k === void 0 ? void 0 : _k.arguments) && Object.keys(mainToolCall.function.arguments);
812
+ const hasSlots = slots && slots.length > 0;
813
+ messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
814
+ if (hasSlots) {
815
+ slots.forEach(slot => {
816
+ let slotValueAsString = mainToolCall.function.arguments[slot];
817
+ if (typeof slotValueAsString === "object" && slotValueAsString !== null) {
818
+ slotValueAsString = JSON.stringify(slotValueAsString, null, 2);
819
+ }
820
+ else {
821
+ String(slotValueAsString);
822
+ }
823
+ messageLines.push(`- ${slot}: ${slotValueAsString}`);
824
+ });
825
+ }
826
+ (_l = api.logDebugMessage) === null || _l === void 0 ? void 0 : _l.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
827
+ }
828
+ if (toolChild) {
829
+ api.setNextNode(toolChild.id);
830
+ }
831
+ else {
832
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
833
+ if (defaultChild) {
834
+ api.setNextNode(defaultChild.id);
835
+ }
836
+ }
837
+ }
838
+ else {
839
+ // Default case
840
+ const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
841
+ if (defaultChild) {
842
+ api.setNextNode(defaultChild.id);
843
+ }
844
+ }
845
+ // Optionally output the result immediately
846
+ // This will also store it into the output
847
+ if (llmResult.result && immediateOutput && !llmPromptOptions.stream) {
848
+ // we stringify objects (e.g. results coming from JSON Mode)
849
+ // so that the transcript only contains text
850
+ const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
851
+ yield ((_m = api.output) === null || _m === void 0 ? void 0 : _m.call(api, resultToOutput, {}));
852
+ }
853
+ else if (llmResult.finishReason && llmPromptOptions.stream) {
854
+ // send the finishReason as last output for a stream
855
+ (_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, "", {
856
+ _cognigy: {
857
+ _preventTranscript: true,
858
+ _messageId,
859
+ _finishReason: llmResult.finishReason,
860
+ }
861
+ });
862
+ }
863
+ // If we are streaming and we got a result, also store it into the transcript, since streamed chunks are not stored there
864
+ if (llmResult.result && llmPromptOptions.stream) {
865
+ const transcriptContent = {
866
+ role: TranscriptRole.ASSISTANT,
867
+ type: TranscriptEntryType.OUTPUT,
868
+ source: "assistant",
869
+ payload: {
870
+ text: ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult),
871
+ data: {},
872
+ }
873
+ };
874
+ yield api.addTranscriptStep(transcriptContent);
875
+ }
876
+ // Add response to Cognigy Input/Context for further usage
877
+ if (storeLocation === "context") {
878
+ (_p = api.addToContext) === null || _p === void 0 ? void 0 : _p.call(api, contextKey, llmResult, "simple");
879
+ }
880
+ else if (storeLocation === "input") {
881
+ api.addToInput(inputKey, llmResult);
882
+ }
883
+ else if (storeLocation === "stream" && streamStoreCopyInInput) {
884
+ api.addToInput(inputKey, llmResult);
885
+ }
886
+ }
887
+ catch (error) {
888
+ const errorDetailsBase = {
889
+ name: error === null || error === void 0 ? void 0 : error.name,
890
+ code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
891
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_q = error.originalErrorDetails) === null || _q === void 0 ? void 0 : _q.message),
892
+ };
893
+ const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
894
+ // return the requestId if it exist in the error obj.
895
+ if ((_r = error.meta) === null || _r === void 0 ? void 0 : _r.requestId) {
896
+ errorDetails["meta"] = {
897
+ requestId: (_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId
898
+ };
899
+ }
900
+ if (logErrorToSystem) {
901
+ (_t = api.log) === null || _t === void 0 ? void 0 : _t.call(api, "error", JSON.stringify(errorDetailsBase));
902
+ }
903
+ (_u = api.logDebugError) === null || _u === void 0 ? void 0 : _u.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
904
+ yield handleServiceError(errorDetails);
905
+ return;
906
+ }
907
+ })
908
+ });
909
+ //# sourceMappingURL=LLMPromptV2.js.map