@cognigy/rest-api-client 0.16.0 → 0.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/build/GenericTusFn.js +6 -1
  3. package/build/GenericUploadFn.js +3 -5
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +12 -1
  5. package/build/connector/AxiosAdapter.js +35 -15
  6. package/build/shared/charts/createNodeDescriptor.js +5 -5
  7. package/build/shared/charts/descriptors/agentAssist/constants/constants.js +16 -1
  8. package/build/shared/charts/descriptors/agentAssist/helpers/agentAssistTranslator.helper.js +19 -0
  9. package/build/shared/charts/descriptors/agentAssist/helpers/getFontSizeFieldOptions.js +84 -0
  10. package/build/shared/charts/descriptors/agentAssist/helpers/getLanguageName.helper.js +33 -0
  11. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/answerExtraction.helper.js +59 -0
  12. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/configValidator.helper.js +20 -0
  13. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/errorHandler.helper.js +64 -0
  14. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/followUpDetection.helper.js +72 -0
  15. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/knowledgeSearch.helper.js +58 -0
  16. package/build/shared/charts/descriptors/agentAssist/helpers/sentiment.helper.js +7 -13
  17. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/identityAssistTemplate.js +17 -18
  18. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +345 -152
  19. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/nextActionWidgetTemplate.js +212 -80
  20. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/sentimentAnalysisTemplate.js +11 -6
  21. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/transcriptAssistTemplate.js +15 -13
  22. package/build/shared/charts/descriptors/agentAssist/identityAssist.js +87 -14
  23. package/build/shared/charts/descriptors/agentAssist/knowledgeAssist.js +192 -327
  24. package/build/shared/charts/descriptors/agentAssist/locales/cs.locale.js +11 -0
  25. package/build/shared/charts/descriptors/agentAssist/locales/de.locale.js +11 -0
  26. package/build/shared/charts/descriptors/agentAssist/locales/en.locale.js +11 -0
  27. package/build/shared/charts/descriptors/agentAssist/locales/es.locale.js +11 -0
  28. package/build/shared/charts/descriptors/agentAssist/locales/fr.locale.js +11 -0
  29. package/build/shared/charts/descriptors/agentAssist/locales/index.js +22 -0
  30. package/build/shared/charts/descriptors/agentAssist/locales/ja.locale.js +11 -0
  31. package/build/shared/charts/descriptors/agentAssist/locales/ko.locale.js +11 -0
  32. package/build/shared/charts/descriptors/agentAssist/locales/pt.locale.js +11 -0
  33. package/build/shared/charts/descriptors/agentAssist/nextActionAssist.js +485 -10
  34. package/build/shared/charts/descriptors/agentAssist/sentimentAssist.js +31 -8
  35. package/build/shared/charts/descriptors/agentAssist/setAgentAssistGrid.js +31 -16
  36. package/build/shared/charts/descriptors/agentAssist/transcriptAssist.js +40 -2
  37. package/build/shared/charts/descriptors/allFields.js +6 -0
  38. package/build/shared/charts/descriptors/analytics/requestRating.js +56 -2
  39. package/build/shared/charts/descriptors/index.js +1 -3
  40. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +1 -1
  41. package/build/shared/charts/descriptors/logic/goTo.js +3 -3
  42. package/build/shared/charts/descriptors/message/question/optionalQuestion.js +1 -1
  43. package/build/shared/charts/descriptors/message/question/question.js +123 -2
  44. package/build/shared/charts/descriptors/message/question/utils/evaluateQuestionAnswer.js +44 -3
  45. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +27 -7
  46. package/build/shared/charts/descriptors/nlu/index.js +1 -3
  47. package/build/shared/charts/descriptors/service/GPTPrompt.js +320 -22
  48. package/build/shared/charts/descriptors/service/LLMEntityExtract.js +274 -0
  49. package/build/shared/charts/descriptors/service/httpRequest.js +33 -1
  50. package/build/shared/charts/descriptors/service/index.js +3 -1
  51. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +35 -25
  52. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +15 -11
  53. package/build/shared/charts/descriptors/voice/nodes/play.js +8 -1
  54. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +51 -75
  55. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +0 -1
  56. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +38 -71
  57. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +17 -25
  58. package/build/shared/charts/helpers/generativeAI/generativeAIPrompts.js +55 -0
  59. package/build/shared/helper/BaseContext.js +31 -0
  60. package/build/shared/interfaces/appsession/IAppSession.js +3 -0
  61. package/build/shared/interfaces/appsession/ISetAppState.js +3 -0
  62. package/build/shared/interfaces/appsession/index.js +3 -0
  63. package/build/shared/interfaces/fileStorage.js +6 -0
  64. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +33 -2
  65. package/build/shared/interfaces/handover.js +2 -1
  66. package/build/shared/interfaces/messageAPI/endpoints.js +17 -2
  67. package/build/shared/interfaces/messageAPI/handover.js +9 -5
  68. package/build/shared/interfaces/nlu/nlu.js +3 -0
  69. package/build/shared/interfaces/resources/IAuditEvent.js +10 -9
  70. package/build/shared/interfaces/resources/ILargeLanguageModel.js +34 -20
  71. package/build/shared/interfaces/resources/IMilestone.js +50 -0
  72. package/build/shared/interfaces/resources/INodeDescriptorSet.js +85 -75
  73. package/build/shared/interfaces/resources/TResourceType.js +12 -5
  74. package/build/shared/interfaces/resources/TSocketChannelType.js +5 -1
  75. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -0
  76. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/ICallCounterAggregatedValue_2_0.js +3 -0
  77. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/IGetCallCounterOrganisationRest_2_0.js +3 -0
  78. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/IGetCallCounterRest_2_0.js +3 -0
  79. package/build/shared/interfaces/restAPI/metrics/callCounter/v2.0/index.js +3 -0
  80. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/ICloneMilestoneRest_2_0.js +3 -0
  81. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/ICreateMilestoneRest_2_0.js +3 -0
  82. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IDeleteMilestoneRest_2_0.js +3 -0
  83. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IIndexMilestonesRest_2_0.js +3 -0
  84. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneIndexItem_2_0.js +3 -0
  85. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStepMetric_2_0.js +3 -0
  86. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStep_2_0.js +3 -0
  87. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestone_2_0.js +3 -0
  88. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IReadMilestoneRest_2_0.js +3 -0
  89. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IUpdateMilestoneRest_2_0.js +3 -0
  90. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/index.js +3 -0
  91. package/build/shared/interfaces/restAPI/resources/uploadResumable/v2.0/IUploadResumableRest_2_0.js +3 -1
  92. package/build/shared/interfaces/security/ICallCounterAggregatedValue.js +3 -0
  93. package/build/shared/interfaces/security/IPermission.js +5 -2
  94. package/build/shared/interfaces/security/IRole.js +6 -0
  95. package/build/shared/interfaces/security/index.js +2 -1
  96. package/build/shared/interfaces/trainer/ITrainerRecord.js +2 -0
  97. package/package.json +2 -2
  98. package/types/index.d.ts +1053 -169
  99. package/build/shared/charts/descriptors/nlu/extractAnswer.js +0 -115
@@ -80,7 +80,7 @@ exports.createLastConverationString = createLastConverationString;
80
80
  }
81
81
  ]
82
82
  */
83
- const createLastConversationChatObject = (lastConversationEntries, systemMessage, turnLimit = 100) => {
83
+ const createLastConversationChatObject = (lastConversationEntries, systemMessage, turnLimit = 100, limitToOnlyUserMessages) => {
84
84
  const conversation = [];
85
85
  if (systemMessage) {
86
86
  conversation.push({
@@ -88,12 +88,31 @@ const createLastConversationChatObject = (lastConversationEntries, systemMessage
88
88
  content: systemMessage
89
89
  });
90
90
  }
91
- lastConversationEntries === null || lastConversationEntries === void 0 ? void 0 : lastConversationEntries.slice(0, turnLimit).reverse().map(entry => {
92
- conversation.push({
93
- role: entry.source === "user" ? "user" : "assistant",
94
- content: entry.text
91
+ if (limitToOnlyUserMessages) {
92
+ // limit the conversation entries to the last X messages with source user
93
+ lastConversationEntries === null || lastConversationEntries === void 0 ? void 0 : lastConversationEntries.filter(entry => entry.source === "user").slice(0, turnLimit).reverse().map(entry => {
94
+ var _a, _b;
95
+ if ((_b = (_a = entry === null || entry === void 0 ? void 0 : entry.text) === null || _a === void 0 ? void 0 : _a.trim()) === null || _b === void 0 ? void 0 : _b.length) {
96
+ conversation.push({
97
+ role: "user",
98
+ content: entry.text.trim()
99
+ });
100
+ }
101
+ });
102
+ }
103
+ else {
104
+ lastConversationEntries === null || lastConversationEntries === void 0 ? void 0 : lastConversationEntries.slice(0, turnLimit).reverse().map(entry => {
105
+ var _a, _b;
106
+ // if text exists, add to conversation
107
+ // necessary to prevent data only messages from being added
108
+ if ((_b = (_a = entry === null || entry === void 0 ? void 0 : entry.text) === null || _a === void 0 ? void 0 : _a.trim()) === null || _b === void 0 ? void 0 : _b.length) {
109
+ conversation.push({
110
+ role: entry.source === "user" ? "user" : "assistant",
111
+ content: entry.text.trim()
112
+ });
113
+ }
95
114
  });
96
- });
115
+ }
97
116
  return conversation;
98
117
  };
99
118
  exports.createLastConversationChatObject = createLastConversationChatObject;
@@ -128,7 +147,8 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
128
147
  requestTokenMessage = ` (${requestTokens} Tokens)`;
129
148
  }
130
149
  if (response) {
131
- const completionTokens = await api.countGPTTokens(response);
150
+ const message = response.result || response;
151
+ const completionTokens = await api.countGPTTokens(message);
132
152
  completionTokenMessage = ` (${completionTokens} Tokens)`;
133
153
  }
134
154
  }
@@ -1,12 +1,10 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.CLEAN_TEXT = exports.GENERATIVE_SLOT_FILLER_FALLBACK = exports.GENERATIVE_SLOT_FILLER_SUCCESS = exports.GENERATIVE_SLOT_FILLER = exports.FUZZY_SEARCH = exports.MATCH_PATTERN = exports.REGEX_SLOT_FILLER = exports.EXTRACT_ANSWER = exports.EXECUTE_COGNIGY_NLU = exports.ADD_LEXICON_KEYPHRASE = void 0;
3
+ exports.CLEAN_TEXT = exports.GENERATIVE_SLOT_FILLER_FALLBACK = exports.GENERATIVE_SLOT_FILLER_SUCCESS = exports.GENERATIVE_SLOT_FILLER = exports.FUZZY_SEARCH = exports.MATCH_PATTERN = exports.REGEX_SLOT_FILLER = exports.EXECUTE_COGNIGY_NLU = exports.ADD_LEXICON_KEYPHRASE = void 0;
4
4
  var addLexiconKeyphrase_1 = require("./addLexiconKeyphrase");
5
5
  Object.defineProperty(exports, "ADD_LEXICON_KEYPHRASE", { enumerable: true, get: function () { return addLexiconKeyphrase_1.ADD_LEXICON_KEYPHRASE; } });
6
6
  var executeCognigyNLU_1 = require("./executeCognigyNLU");
7
7
  Object.defineProperty(exports, "EXECUTE_COGNIGY_NLU", { enumerable: true, get: function () { return executeCognigyNLU_1.EXECUTE_COGNIGY_NLU; } });
8
- var extractAnswer_1 = require("./extractAnswer");
9
- Object.defineProperty(exports, "EXTRACT_ANSWER", { enumerable: true, get: function () { return extractAnswer_1.EXTRACT_ANSWER; } });
10
8
  var regexSlotFiller_1 = require("./regexSlotFiller");
11
9
  Object.defineProperty(exports, "REGEX_SLOT_FILLER", { enumerable: true, get: function () { return regexSlotFiller_1.REGEX_SLOT_FILLER; } });
12
10
  var matchPattern_1 = require("./matchPattern");
@@ -3,7 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GPT_PROMPT = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logic_1 = require("../logic");
6
7
  const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
8
+ const errors_1 = require("../../../errors");
7
9
  exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
8
10
  type: "completeText",
9
11
  defaultLabel: "LLM Prompt",
@@ -189,14 +191,48 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
189
191
  required: true
190
192
  },
191
193
  },
194
+ {
195
+ key: "immediateOutput",
196
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__IMMEDIATEOUTPUT__LABEL",
197
+ type: "toggle",
198
+ defaultValue: false,
199
+ condition: {
200
+ or: [
201
+ {
202
+ key: "storeLocation",
203
+ value: "input",
204
+ },
205
+ {
206
+ key: "storeLocation",
207
+ value: "context",
208
+ }
209
+ ]
210
+ }
211
+ },
192
212
  {
193
213
  key: "inputKey",
194
214
  type: "cognigyText",
195
215
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__INPUT_KEY__LABEL",
196
216
  defaultValue: "promptResult",
197
217
  condition: {
198
- key: "storeLocation",
199
- value: "input",
218
+ or: [
219
+ {
220
+ key: "storeLocation",
221
+ value: "input",
222
+ },
223
+ {
224
+ and: [
225
+ {
226
+ key: "storeLocation",
227
+ value: "stream",
228
+ },
229
+ {
230
+ key: "streamStoreCopyInInput",
231
+ value: true,
232
+ }
233
+ ]
234
+ }
235
+ ]
200
236
  }
201
237
  },
202
238
  {
@@ -225,13 +261,24 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
225
261
  type: "description",
226
262
  label: " ",
227
263
  params: {
228
- text: "Please note streaming may not be supported by all LLM providers. If streaming is not supported, the result is written to the Input object."
264
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_DESCRIPTION__TEXT"
229
265
  },
230
266
  condition: {
231
267
  key: "storeLocation",
232
268
  value: "stream",
233
269
  }
234
270
  },
271
+ {
272
+ key: "streamStoreCopyInInput",
273
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__LABEL",
274
+ type: "toggle",
275
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STORE_COPY__DESCRIPTION",
276
+ defaultValue: false,
277
+ condition: {
278
+ key: "storeLocation",
279
+ value: "stream",
280
+ }
281
+ },
235
282
  {
236
283
  key: "debugLogTokenCount",
237
284
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGTOKENCOUNT__LABEL",
@@ -251,7 +298,145 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
251
298
  type: "description",
252
299
  label: " ",
253
300
  params: {
254
- text: "For performance reasons, debug logging will only be active when testing from Interaction Panel."
301
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUG_DESCRIPTION__TEXT"
302
+ }
303
+ },
304
+ {
305
+ key: "responseFormat",
306
+ type: "select",
307
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__LABEL",
308
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__DESCRIPTION",
309
+ defaultValue: "default",
310
+ params: {
311
+ options: [
312
+ {
313
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__DEFAULT__LABEL",
314
+ value: "default"
315
+ },
316
+ {
317
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__TEXT__LABEL",
318
+ value: "text"
319
+ },
320
+ {
321
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__RESPONSE_FORMAT__OPTIONS__JSON__LABEL",
322
+ value: "json_object"
323
+ }
324
+ ]
325
+ },
326
+ },
327
+ {
328
+ key: "detailedResults",
329
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DETAILED_RESULTS__LABEL",
330
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DETAILED_RESULTS__DESCRIPTION",
331
+ type: "toggle",
332
+ defaultValue: false,
333
+ condition: {
334
+ or: [
335
+ {
336
+ key: "storeLocation",
337
+ value: "input",
338
+ },
339
+ {
340
+ key: "storeLocation",
341
+ value: "context",
342
+ },
343
+ {
344
+ and: [
345
+ {
346
+ key: "storeLocation",
347
+ value: "stream",
348
+ },
349
+ {
350
+ key: "streamStoreCopyInInput",
351
+ value: true,
352
+ }
353
+ ]
354
+ }
355
+ ]
356
+ }
357
+ },
358
+ {
359
+ key: "seed",
360
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__LABEL",
361
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__SEED__DESCRIPTION",
362
+ type: "cognigyText",
363
+ defaultValue: ""
364
+ },
365
+ {
366
+ key: "jsonStreamWarning",
367
+ type: "description",
368
+ label: " ",
369
+ params: {
370
+ text: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__JSONSTREAMWARNING__PARAM"
371
+ },
372
+ condition: {
373
+ and: [
374
+ {
375
+ key: "responseFormat",
376
+ value: "json_object"
377
+ },
378
+ {
379
+ key: "storeLocation",
380
+ value: "stream",
381
+ }
382
+ ]
383
+ }
384
+ },
385
+ {
386
+ key: "customModelOptions",
387
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
388
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__DESCRIPTION",
389
+ type: "json",
390
+ defaultValue: {}
391
+ },
392
+ {
393
+ key: "customRequestOptions",
394
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__LABEL",
395
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_REQUEST_OPTIONS__DESCRIPTION",
396
+ type: "json",
397
+ defaultValue: {}
398
+ },
399
+ {
400
+ key: "errorHandling",
401
+ type: "select",
402
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__LABEL",
403
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__DESCRIPTION",
404
+ defaultValue: "continue",
405
+ params: {
406
+ options: [
407
+ {
408
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__STOP__LABEL",
409
+ value: "stop"
410
+ },
411
+ {
412
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__CONTINUE__LABEL",
413
+ value: "continue"
414
+ },
415
+ {
416
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__HANDLE_SERVICE_ERROR__OPTIONS__GOTO__LABEL",
417
+ value: "goto"
418
+ },
419
+ ]
420
+ }
421
+ },
422
+ {
423
+ key: "errorMessage",
424
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__LABEL",
425
+ type: "cognigyText",
426
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR_MESSAGE__DESCRIPTION",
427
+ defaultValue: "",
428
+ condition: {
429
+ key: "errorHandling",
430
+ value: "continue"
431
+ }
432
+ },
433
+ {
434
+ key: "errorHandlingGotoTarget",
435
+ type: "flowNode",
436
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__ERROR__GOTO_NODE__LABEL",
437
+ condition: {
438
+ key: "errorHandling",
439
+ value: "goto"
255
440
  }
256
441
  },
257
442
  ],
@@ -271,6 +456,9 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
271
456
  "useStop",
272
457
  "stop",
273
458
  "timeout",
459
+ "responseFormat",
460
+ "jsonStreamWarning",
461
+ "seed"
274
462
  ]
275
463
  },
276
464
  {
@@ -279,10 +467,24 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
279
467
  defaultCollapsed: true,
280
468
  fields: [
281
469
  "storeLocation",
470
+ "jsonStreamWarning",
282
471
  "streamDescription",
283
472
  "inputKey",
284
473
  "contextKey",
285
- "streamStopTokens"
474
+ "immediateOutput",
475
+ "streamStopTokens",
476
+ "streamStoreCopyInInput",
477
+ "detailedResults"
478
+ ]
479
+ },
480
+ {
481
+ key: "errorHandling",
482
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__ERROR_HANDLING__LABEL",
483
+ defaultCollapsed: true,
484
+ fields: [
485
+ "errorHandling",
486
+ "errorMessage",
487
+ "errorHandlingGotoTarget",
286
488
  ]
287
489
  },
288
490
  {
@@ -294,6 +496,15 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
294
496
  "debugLogTokenCount",
295
497
  "debugLogRequestAndCompletion"
296
498
  ]
499
+ },
500
+ {
501
+ key: "customOptions",
502
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__SECTIONS__CUSTOM_OPTIONS__LABEL",
503
+ defaultCollapsed: true,
504
+ fields: [
505
+ "customModelOptions",
506
+ "customRequestOptions"
507
+ ]
297
508
  }
298
509
  ],
299
510
  form: [
@@ -303,15 +514,19 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
303
514
  { type: "field", key: "chatTranscriptSteps" },
304
515
  { type: "section", key: "advanced" },
305
516
  { type: "section", key: "storage" },
306
- { type: "section", key: "debugging" }
517
+ { type: "section", key: "errorHandling" },
518
+ { type: "section", key: "debugging" },
519
+ { type: "section", key: "customOptions" }
307
520
  ],
308
521
  appearance: {},
309
522
  tags: ["service", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
310
- function: async ({ cognigy, config }) => {
523
+ function: async ({ cognigy, config, nodeId }) => {
311
524
  var _a;
312
525
  const { api, input } = cognigy;
313
- const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, debugLogTokenCount, debugLogRequestAndCompletion, llmProviderReferenceId, useChatMode, chatTranscriptSteps } = config;
526
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, debugLogTokenCount, debugLogRequestAndCompletion, llmProviderReferenceId, useChatMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, detailedResults, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
527
+ errorHandlingGotoTarget, errorMessage, } = config;
314
528
  let prompt = config.prompt;
529
+ const { traceId } = input;
315
530
  // check if custom variables are used and if they have a length modifier
316
531
  // works only for a single variable per prompt
317
532
  if (prompt.includes("@cognigyRecentConversation")) {
@@ -332,6 +547,60 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
332
547
  const recentUserInputs = (0, prompt_1.createLastUserInputString)(cognigy.lastConversationEntries, turnLimit) + "\n";
333
548
  prompt = prompt.replace(/@cognigyRecentUserInputs(:\d+)?/, recentUserInputs);
334
549
  }
550
+ // handle errors from external services, depending on the settings
551
+ const handleServiceError = async (error) => {
552
+ var _a;
553
+ const compactError = {
554
+ name: error === null || error === void 0 ? void 0 : error.name,
555
+ code: error === null || error === void 0 ? void 0 : error.code,
556
+ message: (error === null || error === void 0 ? void 0 : error.message) || error
557
+ };
558
+ if ((_a = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.code) {
559
+ compactError.code = error.originalErrorDetails.code;
560
+ }
561
+ const errorResponse = {
562
+ error: compactError,
563
+ };
564
+ // add error to context or input
565
+ switch (storeLocation) {
566
+ case "context":
567
+ // @ts-ignore
568
+ api.addToContext(contextKey, errorResponse, "simple");
569
+ break;
570
+ default:
571
+ api.addToInput(inputKey, errorResponse);
572
+ }
573
+ if (errorHandling === "continue") {
574
+ // output the timeout message
575
+ if (errorMessage) {
576
+ api.output(errorMessage, null);
577
+ }
578
+ }
579
+ else if (errorHandling === "goto") {
580
+ if (!errorHandlingGotoTarget) {
581
+ throw new Error("GoTo Target is required");
582
+ }
583
+ const gotoParams = {
584
+ cognigy,
585
+ childConfigs: [],
586
+ nodeId,
587
+ config: {
588
+ flowNode: {
589
+ flow: errorHandlingGotoTarget.flow,
590
+ node: errorHandlingGotoTarget.node,
591
+ },
592
+ injectedText: undefined,
593
+ injectedData: undefined,
594
+ executionMode: "continue",
595
+ absorbContext: false
596
+ }
597
+ };
598
+ await logic_1.GO_TO.function(gotoParams);
599
+ }
600
+ else {
601
+ throw new errors_1.InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
602
+ }
603
+ };
335
604
  try {
336
605
  const data = {
337
606
  prompt,
@@ -344,19 +613,27 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
344
613
  useCase: "promptNode",
345
614
  stream: storeLocation === "stream",
346
615
  streamOnDataHandler: (text) => {
347
- text = text.trim();
616
+ text = text && text.trim();
348
617
  if (text) {
349
618
  api.output(text, null);
350
619
  }
351
620
  },
352
- streamStopTokens
621
+ streamStopTokens,
622
+ detailedResults,
623
+ seed: Number(seed) ? Number(seed) : undefined,
624
+ customModelOptions,
625
+ customRequestOptions
353
626
  };
354
627
  if (useStop) {
355
628
  data["stop"] = stop;
356
629
  }
630
+ // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
357
631
  if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
358
632
  data["llmProviderReferenceId"] = llmProviderReferenceId;
359
633
  }
634
+ if (responseFormat && responseFormat !== "default") {
635
+ data["responseFormat"] = responseFormat;
636
+ }
360
637
  let debugPrompt = prompt;
361
638
  if (useChatMode && (prompt || chatTranscriptSteps)) {
362
639
  data["chat"] = (0, prompt_1.createLastConversationChatObject)(cognigy.lastConversationEntries, prompt, chatTranscriptSteps);
@@ -366,28 +643,49 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
366
643
  const response = await api.runGenerativeAIPrompt(data, "gptPromptNode");
367
644
  // if we're in adminconsole, process debugging options
368
645
  input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
646
+ if (detailedResults) {
647
+ // if the api didn't return native usage results, compute them
648
+ if (!response.usage) {
649
+ const promptContent = (response.messages) ? JSON.stringify(response.messages) : prompt;
650
+ delete response.messages;
651
+ const prompt_tokens = await api.countGPTTokens(promptContent);
652
+ const completion_tokens = await api.countGPTTokens(response.result);
653
+ response.usage = {
654
+ prompt_tokens,
655
+ completion_tokens,
656
+ total_tokens: prompt_tokens + completion_tokens,
657
+ calculation_method: "estimate"
658
+ };
659
+ }
660
+ }
369
661
  if (storeLocation === "context") {
370
662
  api.addToContext(contextKey, response, "simple");
663
+ // output result immediately if toggle is set
664
+ if (immediateOutput) {
665
+ const resultToOutput = typeof ((response === null || response === void 0 ? void 0 : response.result) || response) === "object" ? JSON.stringify((response === null || response === void 0 ? void 0 : response.result) || response, undefined, 2) : (response === null || response === void 0 ? void 0 : response.result) || response;
666
+ api.output(resultToOutput, null);
667
+ }
371
668
  }
372
- else if (storeLocation === "input") {
669
+ else if (storeLocation === "input" || (storeLocation === "stream" && streamStoreCopyInInput)) {
373
670
  // @ts-ignore
374
671
  api.addToInput(inputKey, response);
672
+ // output result immediately if toggle is set and we're storing into input
673
+ // this means we don't output the result again if we streamed
674
+ if (storeLocation === "input" && immediateOutput) {
675
+ const resultToOutput = typeof ((response === null || response === void 0 ? void 0 : response.result) || response) === "object" ? JSON.stringify((response === null || response === void 0 ? void 0 : response.result) || response, undefined, 2) : (response === null || response === void 0 ? void 0 : response.result) || response;
676
+ api.output(resultToOutput, null);
677
+ }
375
678
  }
376
679
  }
377
680
  catch (error) {
378
681
  const errorDetails = {
379
- name: error.name,
380
- code: error.code,
381
- message: error.message || ((_a = error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.message),
682
+ name: error === null || error === void 0 ? void 0 : error.name,
683
+ code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
684
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_a = error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.message),
685
+ originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails
382
686
  };
383
- if (storeLocation === "context") {
384
- api.addToContext(contextKey, { error: errorDetails }, "simple");
385
- }
386
- else {
387
- // @ts-ignore
388
- api.addToInput(inputKey, { error: errorDetails });
389
- }
390
- throw error;
687
+ await handleServiceError(errorDetails);
688
+ return;
391
689
  }
392
690
  }
393
691
  });