@cognigy/rest-api-client 0.17.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (191) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/build/GenericTusFn.js +6 -1
  3. package/build/GenericUploadFn.js +3 -5
  4. package/build/apigroups/AdministrationAPIGroup_2_0.js +3 -1
  5. package/build/apigroups/MetricsAPIGroup_2_0.js +5 -0
  6. package/build/apigroups/ResourcesAPIGroup_2_0.js +21 -7
  7. package/build/connector/AxiosAdapter.js +35 -15
  8. package/build/shared/charts/createNodeDescriptor.js +5 -5
  9. package/build/shared/charts/descriptors/agentAssist/constants/constants.js +16 -1
  10. package/build/shared/charts/descriptors/agentAssist/helpers/agentAssistTranslator.helper.js +19 -0
  11. package/build/shared/charts/descriptors/agentAssist/helpers/determineMetadata.js +15 -0
  12. package/build/shared/charts/descriptors/agentAssist/helpers/getFontSizeFieldOptions.js +84 -0
  13. package/build/shared/charts/descriptors/agentAssist/helpers/getLanguageName.helper.js +33 -0
  14. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/answerExtraction.helper.js +59 -0
  15. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/configValidator.helper.js +20 -0
  16. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/errorHandler.helper.js +64 -0
  17. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/followUpDetection.helper.js +72 -0
  18. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/knowledgeSearch.helper.js +58 -0
  19. package/build/shared/charts/descriptors/agentAssist/helpers/sentiment.helper.js +7 -13
  20. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/identityAssistTemplate.js +17 -18
  21. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/knowledgeAssistTemplate.js +330 -153
  22. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/nextActionWidgetTemplate.js +212 -80
  23. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/sentimentAnalysisTemplate.js +11 -6
  24. package/build/shared/charts/descriptors/agentAssist/htmlTemplates/transcriptAssistTemplate.js +15 -13
  25. package/build/shared/charts/descriptors/agentAssist/identityAssist.js +88 -15
  26. package/build/shared/charts/descriptors/agentAssist/knowledgeAssist.js +192 -327
  27. package/build/shared/charts/descriptors/agentAssist/locales/cs.locale.js +11 -0
  28. package/build/shared/charts/descriptors/agentAssist/locales/de.locale.js +11 -0
  29. package/build/shared/charts/descriptors/agentAssist/locales/en.locale.js +11 -0
  30. package/build/shared/charts/descriptors/agentAssist/locales/es.locale.js +11 -0
  31. package/build/shared/charts/descriptors/agentAssist/locales/fr.locale.js +11 -0
  32. package/build/shared/charts/descriptors/agentAssist/locales/index.js +22 -0
  33. package/build/shared/charts/descriptors/agentAssist/locales/ja.locale.js +11 -0
  34. package/build/shared/charts/descriptors/agentAssist/locales/ko.locale.js +11 -0
  35. package/build/shared/charts/descriptors/agentAssist/locales/pt.locale.js +11 -0
  36. package/build/shared/charts/descriptors/agentAssist/nextActionAssist.js +484 -10
  37. package/build/shared/charts/descriptors/agentAssist/sentimentAssist.js +32 -9
  38. package/build/shared/charts/descriptors/agentAssist/setAdaptiveCardTile.js +2 -0
  39. package/build/shared/charts/descriptors/agentAssist/setAgentAssistGrid.js +2 -1
  40. package/build/shared/charts/descriptors/agentAssist/setHtmlTile.js +5 -3
  41. package/build/shared/charts/descriptors/agentAssist/setIframeTile.js +5 -3
  42. package/build/shared/charts/descriptors/agentAssist/setSecureFormsTile.js +2 -2
  43. package/build/shared/charts/descriptors/agentAssist/transcriptAssist.js +42 -3
  44. package/build/shared/charts/descriptors/analytics/activateProfile.js +1 -0
  45. package/build/shared/charts/descriptors/analytics/blindMode.js +2 -0
  46. package/build/shared/charts/descriptors/analytics/completeGoal.js +1 -0
  47. package/build/shared/charts/descriptors/analytics/deactivateProfile.js +1 -0
  48. package/build/shared/charts/descriptors/analytics/deleteProfile.js +1 -0
  49. package/build/shared/charts/descriptors/analytics/index.js +3 -1
  50. package/build/shared/charts/descriptors/analytics/mergeProfile.js +1 -0
  51. package/build/shared/charts/descriptors/analytics/overwriteAnalytics.js +9 -0
  52. package/build/shared/charts/descriptors/analytics/requestRating.js +56 -2
  53. package/build/shared/charts/descriptors/analytics/setRating.js +4 -2
  54. package/build/shared/charts/descriptors/analytics/trackMilestone.js +95 -0
  55. package/build/shared/charts/descriptors/analytics/updateProfile.js +1 -0
  56. package/build/shared/charts/descriptors/apps/initAppSession.js +1 -0
  57. package/build/shared/charts/descriptors/apps/setAdaptiveCardAppState.js +35 -10
  58. package/build/shared/charts/descriptors/apps/setHtmlAppState.js +25 -2
  59. package/build/shared/charts/descriptors/apps/utils/getXAppsOverlaySettings.js +54 -0
  60. package/build/shared/charts/descriptors/connectionNodes/documentParserProviders/azureAIDocumentIntelligenceConnection.js +12 -0
  61. package/build/shared/charts/descriptors/connectionNodes/documentParserProviders/index.js +13 -0
  62. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/awsBedrockProviderConnection.js +12 -0
  63. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnection.js +4 -3
  64. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnectionV2.js +3 -3
  65. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +14 -0
  66. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +16 -8
  67. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/openAIProviderConnection.js +3 -3
  68. package/build/shared/charts/descriptors/data/addToContext.js +7 -0
  69. package/build/shared/charts/descriptors/data/copyDataToContext.js +3 -0
  70. package/build/shared/charts/descriptors/data/copySlotsToContext.js +3 -0
  71. package/build/shared/charts/descriptors/data/debugMessage.js +73 -0
  72. package/build/shared/charts/descriptors/data/index.js +3 -1
  73. package/build/shared/charts/descriptors/data/removeFromContext.js +9 -1
  74. package/build/shared/charts/descriptors/data/resetContext.js +1 -0
  75. package/build/shared/charts/descriptors/index.js +8 -4
  76. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +36 -11
  77. package/build/shared/charts/descriptors/logic/resetState.js +1 -0
  78. package/build/shared/charts/descriptors/logic/setState.js +2 -1
  79. package/build/shared/charts/descriptors/logic/setTranslation.js +3 -1
  80. package/build/shared/charts/descriptors/logic/switchLocale.js +1 -0
  81. package/build/shared/charts/descriptors/logic/think.js +3 -1
  82. package/build/shared/charts/descriptors/logic/thinkV2.js +2 -0
  83. package/build/shared/charts/descriptors/message/question/optionalQuestion.js +1 -1
  84. package/build/shared/charts/descriptors/message/question/question.js +173 -7
  85. package/build/shared/charts/descriptors/message/question/utils/evaluateQuestionAnswer.js +44 -3
  86. package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -2
  87. package/build/shared/charts/descriptors/nlu/cleanText.js +1 -0
  88. package/build/shared/charts/descriptors/nlu/fuzzySearch.js +23 -1
  89. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +45 -19
  90. package/build/shared/charts/descriptors/nlu/index.js +1 -3
  91. package/build/shared/charts/descriptors/service/GPTPrompt.js +362 -29
  92. package/build/shared/charts/descriptors/service/LLMEntityExtract.js +283 -0
  93. package/build/shared/charts/descriptors/service/handoverV2.js +84 -1
  94. package/build/shared/charts/descriptors/service/httpRequest.js +68 -3
  95. package/build/shared/charts/descriptors/service/index.js +3 -1
  96. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +110 -27
  97. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +4 -4
  98. package/build/shared/charts/descriptors/voice/nodes/bargeIn.js +2 -0
  99. package/build/shared/charts/descriptors/voice/nodes/continuousAsr.js +2 -0
  100. package/build/shared/charts/descriptors/voice/nodes/dtmf.js +2 -0
  101. package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -0
  102. package/build/shared/charts/descriptors/voice/nodes/noUserInput.js +2 -0
  103. package/build/shared/charts/descriptors/voice/nodes/play.js +8 -1
  104. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +46 -45
  105. package/build/shared/charts/descriptors/voice/nodes/transfer.js +2 -0
  106. package/build/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +2 -0
  107. package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +2 -0
  108. package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +3 -0
  109. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -1
  110. package/build/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -0
  111. package/build/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -0
  112. package/build/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -0
  113. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +329 -47
  114. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +27 -31
  115. package/build/shared/charts/descriptors/voicegateway2/utils/helper.js +2 -2
  116. package/build/shared/charts/helpers/generativeAI/generativeAIPrompts.js +55 -0
  117. package/build/shared/charts/helpers/generativeAI/rephraseSentenceWithAi.js +4 -2
  118. package/build/shared/constants.js +10 -1
  119. package/build/shared/handoverClients/interfaces/THandoverEventType.js +1 -0
  120. package/build/shared/helper/logFullConfigToDebugMode.js +30 -0
  121. package/build/shared/helper/nlu/textCleaner.js +3 -1
  122. package/build/shared/interfaces/IOrganisation.js +1 -0
  123. package/build/shared/interfaces/IProfile.js +1 -0
  124. package/build/shared/interfaces/IProfileSchema.js +3 -0
  125. package/build/shared/interfaces/analytics/IAnalyticsSourceData.js +20 -20
  126. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourneyProgress_2_0.js → analytics/IMilestoneAnalytics.js} +1 -1
  127. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourney_2_0.js → appsession/IAppSession.js} +1 -1
  128. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourneyStep_2_0.js → appsession/ISetAppState.js} +1 -1
  129. package/build/shared/interfaces/appsession/ISetAppStateOptions.js +3 -0
  130. package/build/shared/interfaces/appsession/ISetAppStateOverlaySettings.js +3 -0
  131. package/build/shared/interfaces/appsession/ISetAppStateOverlaySettingsMetaData.js +3 -0
  132. package/build/shared/interfaces/debugEvents/TDebugEventMessagePayload.js +3 -0
  133. package/build/shared/interfaces/debugEvents/TDebugEventType.js +2 -0
  134. package/build/shared/interfaces/fileStorage.js +6 -0
  135. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +45 -52
  136. package/build/shared/interfaces/handover.js +44 -2
  137. package/build/shared/interfaces/license.js +3 -2
  138. package/build/shared/interfaces/license.js.map +1 -1
  139. package/build/shared/interfaces/messageAPI/endpoints.js +17 -2
  140. package/build/shared/interfaces/messageAPI/handover.js +25 -2
  141. package/build/shared/interfaces/nlu/nlu.js +3 -0
  142. package/build/shared/interfaces/resources/IAuditEvent.js +10 -9
  143. package/build/shared/interfaces/resources/IConnection.js +1 -0
  144. package/build/shared/interfaces/resources/IEndpoint.js +1 -2
  145. package/build/shared/interfaces/resources/ILargeLanguageModel.js +56 -21
  146. package/build/shared/interfaces/resources/IMilestone.js +50 -0
  147. package/build/shared/interfaces/resources/INodeDescriptorSet.js +96 -75
  148. package/build/shared/interfaces/resources/TResourceType.js +12 -5
  149. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  150. package/build/shared/interfaces/resources/settings/IAgentSettings.js +12 -7
  151. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +8 -0
  152. package/build/shared/interfaces/resources/settings/IKnowledgeAISettings.js +18 -0
  153. package/build/shared/interfaces/resources/settings/index.js +4 -1
  154. package/build/shared/interfaces/restAPI/administration/organisations/v2.0/IReadCollectionsToBeDeletedRest_2_0.js +0 -1
  155. package/build/shared/interfaces/restAPI/administration/organisations/v2.0/IReadOrganisationKnowledgeChunksCountRest_2_0.js +3 -0
  156. package/build/shared/interfaces/restAPI/operations/index.js +3 -0
  157. package/build/shared/interfaces/restAPI/operations/nlu/v2.0/IGenerateNluScoresRest_2_0.js +57 -0
  158. package/build/shared/interfaces/restAPI/operations/nlu/v2.0/index.js +3 -0
  159. package/build/shared/interfaces/restAPI/resources/largeLanguageModel/v2.0/IAvailableModelsForLLMProvider_2_0 .js +18 -0
  160. package/build/shared/interfaces/restAPI/resources/largeLanguageModel/v2.0/IGetAvailableModelsForLLMRest_2_0 .js +3 -0
  161. package/build/shared/interfaces/restAPI/resources/{journey/v2.0/IJourneyIndexItem_2_0.js → milestone/v2.0/ICloneMilestoneRest_2_0.js} +1 -1
  162. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/ICreateMilestoneRest_2_0.js +3 -0
  163. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IDeleteMilestoneRest_2_0.js +3 -0
  164. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IIndexMilestonesRest_2_0.js +3 -0
  165. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneIndexItem_2_0.js +3 -0
  166. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStepMetric_2_0.js +3 -0
  167. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStep_2_0.js +3 -0
  168. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestone_2_0.js +3 -0
  169. package/build/shared/interfaces/restAPI/resources/{journey/v2.0/IIndexJourneysRest_2_0.js → milestone/v2.0/IReadMilestoneRest_2_0.js} +1 -1
  170. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IUpdateMilestoneRest_2_0.js +3 -0
  171. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/index.js +3 -0
  172. package/build/shared/interfaces/restAPI/resources/uploadResumable/v2.0/IUploadResumableRest_2_0.js +3 -1
  173. package/build/shared/interfaces/security/IPermission.js +4 -2
  174. package/build/shared/interfaces/security/IRole.js +2 -0
  175. package/build/shared/interfaces/security/ISystemCapabilities.js +3 -0
  176. package/build/shared/interfaces/security/index.js +1 -1
  177. package/build/shared/interfaces/trainer/ITrainerRecord.js +2 -2
  178. package/build/shared/interfaces/user.js +1 -1
  179. package/package.json +2 -2
  180. package/types/index.d.ts +2194 -957
  181. package/build/shared/charts/descriptors/nlu/extractAnswer.js +0 -115
  182. package/build/shared/interfaces/journeys/IJourney.js +0 -83
  183. package/build/shared/interfaces/journeys/IJourneyProgress.js +0 -40
  184. package/build/shared/interfaces/journeys/IJourneyTrackEvent.js +0 -35
  185. package/build/shared/interfaces/journeys/index.js +0 -14
  186. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IJourneyTrackEvents_2_0.js +0 -3
  187. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IReadJourneyProgressRest_2_0.js +0 -3
  188. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IReadJourneyRest_2_0.js +0 -3
  189. package/build/shared/interfaces/restAPI/resources/journey/v2.0/ITrackJourneyEventRest_2_0.js +0 -3
  190. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IUpdateSelectedJourneyRest_2_0.js +0 -3
  191. /package/build/shared/interfaces/{restAPI/resources/journey/v2.0 → appsession}/index.js +0 -0
@@ -196,6 +196,25 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
196
196
  tagLimit: 5
197
197
  }
198
198
  },
199
+ {
200
+ key: "searchSourceTagsFilterOp",
201
+ type: "select",
202
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS_FILTER_OP__LABEL",
203
+ description: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__DESCRIPTION",
204
+ defaultValue: "and",
205
+ params: {
206
+ options: [
207
+ {
208
+ label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__AND__LABEL",
209
+ value: "and"
210
+ },
211
+ {
212
+ label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__OR__LABEL",
213
+ value: "or"
214
+ },
215
+ ]
216
+ }
217
+ },
199
218
  {
200
219
  key: "prompt",
201
220
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PROMPT__LABEL",
@@ -451,7 +470,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
451
470
  key: "searchSettings",
452
471
  label: "Search Settings",
453
472
  defaultCollapsed: true,
454
- fields: ["followUpDetection", "followUpDetectionSteps", "searchString", "searchStringDescription", "topK", "searchStoreLocation", "searchStoreWarning", "searchStoreLocationInputKey", "searchStoreLocationContextKey", "searchSourceTags"]
473
+ fields: ["followUpDetection", "followUpDetectionSteps", "searchString", "searchStringDescription", "topK", "searchStoreLocation", "searchStoreWarning", "searchStoreLocationInputKey", "searchStoreLocationContextKey", "searchSourceTags", "searchSourceTagsFilterOp"]
455
474
  },
456
475
  {
457
476
  key: "extractSettings",
@@ -532,7 +551,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
532
551
  var _a, _b, _c, _d, _e, _f, _g, _h, _j;
533
552
  const { cognigy, config, nodeId } = knowledgeSearchParams;
534
553
  const { input, api } = cognigy;
535
- const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
554
+ const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
536
555
  let { followUpDetectionSteps } = config;
537
556
  let { prompt } = config;
538
557
  // timeout message name not changed because of legacy compatibility
@@ -554,6 +573,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
554
573
  }
555
574
  let actualSearchString = searchString;
556
575
  // check if follow up detection is active and if yes, handle accordingly
576
+ // this is "context aware search"
557
577
  if (followUpDetection === "transcript") {
558
578
  let prompt;
559
579
  let lastRoundTrip;
@@ -574,9 +594,9 @@ Does the last USER input refer to the conversation before?
574
594
  Answer with "true" or "false". Answer:`;
575
595
  let promptResponse;
576
596
  try {
577
- promptResponse = await api.runGenerativeAIPrompt({ prompt }, "gptPromptNode");
597
+ promptResponse = await api.runGenerativeAIPrompt({ prompt }, "answerExtraction");
578
598
  // if we're in adminconsole, process debugging options
579
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, promptResponse, debugLogTokenCount, false, cognigy);
599
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, promptResponse, debugLogTokenCount, false, cognigy);
580
600
  // check if LLM thinks the input was a follow up
581
601
  if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
582
602
  prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
@@ -593,11 +613,12 @@ Context:
593
613
  ${lastRoundTrip}
594
614
  Question: ${searchString}
595
615
  New: `;
596
- promptResponse = await api.runGenerativeAIPrompt({ prompt }, "gptPromptNode");
616
+ promptResponse = await api.runGenerativeAIPrompt({ prompt }, "answerExtraction");
597
617
  // if we're in adminconsole, process debugging options
598
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection 2", prompt, promptResponse, debugLogTokenCount, false, cognigy);
618
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection 2", prompt, promptResponse, debugLogTokenCount, false, cognigy);
599
619
  // the actual search string to now use is the rewritten question
600
620
  actualSearchString = promptResponse;
621
+ api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
601
622
  }
602
623
  }
603
624
  catch (err) {
@@ -612,6 +633,7 @@ New: `;
612
633
  const compactError = {
613
634
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
614
635
  };
636
+ api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
615
637
  if ((_a = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.code) {
616
638
  compactError["code"] = error.originalErrorDetails.code;
617
639
  }
@@ -673,7 +695,10 @@ New: `;
673
695
  searchSourceTags.forEach((tag, index) => {
674
696
  searchSourceTags[index] = tag.toString();
675
697
  });
676
- data.tags = searchSourceTags;
698
+ data.tagsData = {
699
+ tags: searchSourceTags,
700
+ op: searchSourceTagsFilterOp || "and" // default to "and" if not set (backwards compatibility)
701
+ };
677
702
  }
678
703
  // Add knowledgeStoreIds to data
679
704
  if (knowledgeStoreId) {
@@ -682,7 +707,7 @@ New: `;
682
707
  // Perform knowledge search
683
708
  try {
684
709
  const knowledgeSearchResponse = await api.knowledgeSearch(data);
685
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
710
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
686
711
  // Handle possible response errors
687
712
  if ((knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.status) !== "success") {
688
713
  const errorMessage = (knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.error) || "empty";
@@ -738,7 +763,7 @@ New: `;
738
763
  frequencyPenalty,
739
764
  timeoutInMs: timeout,
740
765
  useCase: "promptNode",
741
- stream: outputMode === "stream",
766
+ stream: outputMode === "stream" && mode === "seo",
742
767
  streamOnDataHandler: (text) => {
743
768
  streamedOutput = true;
744
769
  api.output(text, null);
@@ -750,9 +775,9 @@ New: `;
750
775
  }
751
776
  let promptResponse;
752
777
  try {
753
- promptResponse = await api.runGenerativeAIPrompt(promptData, "gptPromptNode");
778
+ promptResponse = await api.runGenerativeAIPrompt(promptData, "answerExtraction");
754
779
  // if we're in adminconsole, process debugging options
755
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Main Prompt", prompt, promptResponse, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
780
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Main Prompt", prompt, promptResponse, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
756
781
  if (storeLocation === "context") {
757
782
  api.addToContext(contextKey, promptResponse, "simple");
758
783
  }
@@ -22,6 +22,7 @@ exports.RESET_STATE = (0, createNodeDescriptor_1.createNodeDescriptor)({
22
22
  const { api, input } = cognigy;
23
23
  const state = await api.resetState();
24
24
  input.state = state;
25
+ api.logDebugMessage(`UI__DEBUG_MODE__RESET_STATE__MESSAGE`);
25
26
  }
26
27
  });
27
28
  //# sourceMappingURL=resetState.js.map
@@ -30,7 +30,7 @@ exports.SET_STATE = (0, createNodeDescriptor_1.createNodeDescriptor)({
30
30
  {
31
31
  key: "data",
32
32
  type: "json",
33
- label: "UI__NODE_EDITOR__SET_STATE__DATA__LABEL "
33
+ label: "UI__NODE_EDITOR__SET_STATE__DATA__LABEL"
34
34
  }
35
35
  ],
36
36
  preview: {
@@ -43,6 +43,7 @@ exports.SET_STATE = (0, createNodeDescriptor_1.createNodeDescriptor)({
43
43
  const { input, api } = cognigy;
44
44
  api.setState(state);
45
45
  input.state = state;
46
+ api.logDebugMessage(`UI__DEBUG_MODE__SET_STATE__MESSAGE ${state}`);
46
47
  if (text || data) {
47
48
  api.think(text, data);
48
49
  }
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.SET_TRANSLATION = void 0;
4
4
  /** Custom Modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
6
7
  exports.SET_TRANSLATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
7
8
  type: "setTranslation",
8
9
  defaultLabel: "Set Translation",
@@ -72,7 +73,7 @@ exports.SET_TRANSLATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
72
73
  { type: "field", key: "alwaysRemoveNoTranslateMarker" },
73
74
  ],
74
75
  function: async ({ cognigy, config }) => {
75
- const { api } = cognigy;
76
+ const { api, input } = cognigy;
76
77
  const { translationEnabled, flowLanguage, inputLanguage, padPayloads, alwaysRemoveNoTranslateMarker, noTranslateMarker, setInputLanguageOnExecutionCount } = config;
77
78
  api.setTranslationSettings({
78
79
  translationEnabled,
@@ -83,6 +84,7 @@ exports.SET_TRANSLATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
83
84
  noTranslateMarker,
84
85
  setInputLanguageOnExecutionCount
85
86
  });
87
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
86
88
  }
87
89
  });
88
90
  //# sourceMappingURL=setTranslation.js.map
@@ -59,6 +59,7 @@ exports.SWITCH_LOCALE = (0, createNodeDescriptor_1.createNodeDescriptor)({
59
59
  else {
60
60
  await api.setLocaleReferenceId(localeReferenceId);
61
61
  }
62
+ api.logDebugMessage(`UI__DEBUG_MODE__SWITCH_LOCALE__MESSAGE`);
62
63
  }
63
64
  });
64
65
  //# sourceMappingURL=switchLocale.js.map
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.THINK = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
6
7
  /**
7
8
  * Node name: 'think'
8
9
  *
@@ -93,10 +94,11 @@ exports.THINK = (0, createNodeDescriptor_1.createNodeDescriptor)({
93
94
  key: "text",
94
95
  type: "text",
95
96
  },
96
- tags: ["basic", "logic", "recursion", "inject"],
97
+ tags: ["logic", "recursion", "inject"],
97
98
  function: async ({ cognigy, config }) => {
98
99
  const { text, data, intent, thinkType, } = config;
99
100
  const { api } = cognigy;
101
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
100
102
  if (thinkType === "intent") {
101
103
  api.think(`cIntent:${intent}`, null);
102
104
  }
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.THINK_V2 = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
6
7
  /**
7
8
  * Node name: 'think'
8
9
  *
@@ -93,6 +94,7 @@ exports.THINK_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
93
94
  function: async ({ cognigy, config }) => {
94
95
  const { text, data, intent, thinkType, } = config;
95
96
  const { api } = cognigy;
97
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
96
98
  if (thinkType === "intent") {
97
99
  api.thinkV2(`cIntent:${intent}`, null);
98
100
  }
@@ -346,7 +346,7 @@ exports.OPTIONAL_QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
346
346
  }
347
347
  return;
348
348
  }
349
- const result = (0, evaluateQuestionAnswer_1.evaluateQuestionAnswer)({ cognigy, config });
349
+ const result = await (0, evaluateQuestionAnswer_1.evaluateQuestionAnswer)({ cognigy, config });
350
350
  if (storeResultInContext && contextKey) {
351
351
  // check whether contextKey is a deep query and set
352
352
  // context storage location accordingly
@@ -78,6 +78,7 @@ var QuestionTypes;
78
78
  QuestionTypes[QuestionTypes["ipv4"] = 20] = "ipv4";
79
79
  QuestionTypes[QuestionTypes["creditcard"] = 21] = "creditcard";
80
80
  QuestionTypes[QuestionTypes["phonenumber"] = 22] = "phonenumber";
81
+ QuestionTypes[QuestionTypes["llm_entity"] = 23] = "llm_entity";
81
82
  })(QuestionTypes = exports.QuestionTypes || (exports.QuestionTypes = {}));
82
83
  //#endregion Interfaces
83
84
  exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
@@ -156,6 +157,10 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
156
157
  label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS_TYPE_OPTIONS__REGEX__LABEL",
157
158
  value: "regex",
158
159
  },
160
+ {
161
+ label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS_TYPE_OPTIONS__LLM_ENTITY__LABEL",
162
+ value: "llm_entity"
163
+ },
159
164
  {
160
165
  label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS_TYPE_OPTIONS__DATA__LABEL",
161
166
  value: "data",
@@ -319,6 +324,16 @@ DO NOT talk about other topics. Do not offer general assistance.`,
319
324
  value: "llm",
320
325
  }
321
326
  },
327
+ {
328
+ key: "repromptLLMStream",
329
+ label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__STREAM_REPROMPT__LABEL",
330
+ type: "toggle",
331
+ defaultValue: false,
332
+ condition: {
333
+ key: "repromptType",
334
+ value: "llm",
335
+ }
336
+ },
322
337
  {
323
338
  key: "repromptLLMTurns",
324
339
  label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__TRANSCRIPT_STEPS__LABEL",
@@ -335,6 +350,17 @@ DO NOT talk about other topics. Do not offer general assistance.`,
335
350
  value: "llm",
336
351
  }
337
352
  },
353
+ {
354
+ key: "repromptLLMStreamStopTokens",
355
+ type: "textArray",
356
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
357
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
358
+ defaultValue: [".", "!", "?", "\\n"],
359
+ condition: {
360
+ key: "repromptLLMStream",
361
+ value: true,
362
+ }
363
+ },
338
364
  {
339
365
  key: "repromptFlowNode",
340
366
  type: "flowNode",
@@ -447,6 +473,9 @@ DO NOT talk about other topics. Do not offer general assistance.`,
447
473
  type: "cognigyText",
448
474
  label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__CONTEXT_KEY__LABEL",
449
475
  defaultValue: "result",
476
+ params: {
477
+ noTranslate: true
478
+ },
450
479
  condition: {
451
480
  key: "storeResultInContext",
452
481
  value: true
@@ -1370,6 +1399,97 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1370
1399
  value: "reconfirm"
1371
1400
  }
1372
1401
  },
1402
+ {
1403
+ key: "llmEntityExtractLLMProviderReferenceId",
1404
+ type: "llmSelect",
1405
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__LLM_SELECT__LABEL",
1406
+ defaultValue: "default",
1407
+ params: {
1408
+ required: true
1409
+ },
1410
+ condition: {
1411
+ key: "type",
1412
+ value: "llm_entity"
1413
+ }
1414
+ },
1415
+ {
1416
+ key: "entityName",
1417
+ type: "cognigyText",
1418
+ label: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__ENTITY_NAME__LABEL",
1419
+ description: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__ENTITY_NAME__DESCRIPTION",
1420
+ defaultValue: "customerID",
1421
+ params: {
1422
+ required: true
1423
+ },
1424
+ condition: {
1425
+ key: "type",
1426
+ value: "llm_entity"
1427
+ }
1428
+ },
1429
+ {
1430
+ key: "examples",
1431
+ type: "keyValuePairs",
1432
+ label: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__EXAMPLES__LABEL",
1433
+ description: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__EXAMPLES__DESCRIPTION",
1434
+ defaultValue: {
1435
+ "My ID is AB54EE, is that ok?": "AB54EE",
1436
+ "That would be ah bee see double 4 three": "ABC443",
1437
+ "I guess it's 49 A B 8 K": "49AB8K"
1438
+ },
1439
+ params: {
1440
+ keyLabel: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__EXAMPLES__INPUT__KEY__LABEL",
1441
+ valueLabel: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__EXAMPLES__INPUT__VALUE__LABEL"
1442
+ },
1443
+ condition: {
1444
+ key: "type",
1445
+ value: "llm_entity"
1446
+ }
1447
+ },
1448
+ {
1449
+ key: "entityDescription",
1450
+ type: "cognigyText",
1451
+ label: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__ENTITY_DESCRIPTION__LABEL",
1452
+ description: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__ENTITY_DESCRIPTION__DESCRIPTION",
1453
+ defaultValue: "a customer ID, which has 6 alphanumeric characters (e.g. ABC123).",
1454
+ params: {
1455
+ required: true
1456
+ },
1457
+ condition: {
1458
+ key: "type",
1459
+ value: "llm_entity"
1460
+ }
1461
+ },
1462
+ {
1463
+ key: "llmEntityExtractDescription",
1464
+ type: "description",
1465
+ label: " ",
1466
+ params: {
1467
+ text: "UI__NODE_EDITOR__SERVICE__LLM_ENTITY_EXTRACT__FIELDS__DESCRIPTION__TEXT",
1468
+ },
1469
+ condition: {
1470
+ key: "type",
1471
+ value: "llm_entity"
1472
+ }
1473
+ },
1474
+ {
1475
+ key: "llmentityTemperature",
1476
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__LABEL",
1477
+ type: "slider",
1478
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TEMPERATURE__DESCRIPTION",
1479
+ defaultValue: 0.7,
1480
+ params: {
1481
+ min: 0,
1482
+ max: 1,
1483
+ step: 0.1
1484
+ }
1485
+ },
1486
+ {
1487
+ key: "llmentityTimeout",
1488
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__LABEL",
1489
+ defaultValue: 5000,
1490
+ type: "number",
1491
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__TIMEOUT__DESCRIPTION",
1492
+ },
1373
1493
  ...(0, datepickerUtils_1.getDatePickerFields)({
1374
1494
  "key": "type",
1375
1495
  "value": "date"
@@ -1483,7 +1603,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1483
1603
  },
1484
1604
  ],
1485
1605
  },
1486
- },
1606
+ }
1487
1607
  ].filter(field => !!field),
1488
1608
  sections: [
1489
1609
  {
@@ -1507,6 +1627,8 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1507
1627
  "repromptLLMProvider",
1508
1628
  "repromptLLMPrompt",
1509
1629
  "repromptLLMTurns",
1630
+ "repromptLLMStream",
1631
+ "repromptLLMStreamStopTokens",
1510
1632
  "repromptSay",
1511
1633
  "repromptFlowNode",
1512
1634
  "repromptParseIntents",
@@ -1653,18 +1775,38 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1653
1775
  "cleanTextLocale"
1654
1776
  ],
1655
1777
  },
1778
+ {
1779
+ key: "llmEntityExtractOptions",
1780
+ label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__SECTIONS__LLM_ENTITY_EXTRACT_OPTIONS__LABEL",
1781
+ defaultCollapsed: true,
1782
+ fields: [
1783
+ "llmEntityExtractLLMProviderReferenceId",
1784
+ "entityName",
1785
+ "entityDescription",
1786
+ "examples",
1787
+ "additionalValidation",
1788
+ "llmentityTemperature",
1789
+ "llmentityTimeout"
1790
+ ],
1791
+ condition: {
1792
+ key: "type",
1793
+ value: "llm_entity"
1794
+ },
1795
+ },
1656
1796
  ...(0, datepickerUtils_1.getDatePickerSections)("Datepicker - ", {
1657
1797
  "key": "type",
1658
1798
  "value": "date"
1659
1799
  }),
1660
- (0, getRephraseWithAIFields_1.getRephraseWithAISection)(),
1800
+ (0, getRephraseWithAIFields_1.getRephraseWithAISection)()
1661
1801
  ].filter(section => !!section),
1662
1802
  form: [
1663
1803
  { type: "field", key: "type" },
1804
+ { type: "field", key: "llmEntityExtractDescription" },
1664
1805
  { type: "field", key: "keyphraseTag" },
1665
1806
  { type: "field", key: "usePositiveOnly" },
1666
1807
  { type: "field", key: "regex" },
1667
1808
  { type: "field", key: "say" },
1809
+ { type: "section", key: "llmEntityExtractOptions" },
1668
1810
  { type: "section", key: "context" },
1669
1811
  { type: "section", key: "reprompt" },
1670
1812
  { type: "section", key: "reconfirmation" },
@@ -1690,7 +1832,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1690
1832
  //#endregion DescriptorFields
1691
1833
  function: async ({ cognigy, nodeId, organisationId, config, inputOptions }) => {
1692
1834
  var _a, _b, _c;
1693
- const { say, type, validationMessage, repromptLLMProvider, repromptType, repromptLLMPrompt, repromptLLMTurns, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1835
+ const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1694
1836
  const { input, context, profile, api } = cognigy;
1695
1837
  const rephraseWithAIParams = {
1696
1838
  generativeAI_rephraseOutputMode: config.generativeAI_rephraseOutputMode,
@@ -1745,17 +1887,25 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1745
1887
  overwriteAnswer = await api.executeCognigyNLU(overwriteAnswer.text, input.data, input.inputId, { parseIntents: true, parseSlots: true, parseSystemSlots: true, findType: true });
1746
1888
  overwriteAnswer.text = cleanedAnswer;
1747
1889
  }
1890
+ // DEBUG MODE LOGS
1891
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_1 ${overwriteAnswer.text}`, "Applied Answer Preprocessing");
1748
1892
  }
1749
1893
  else if (overwrittenBaseAnswer) {
1750
1894
  // if only overwrittenBaseAnswer was specified, copy the input and overwrite the text property only
1751
1895
  overwriteAnswer = JSON.parse(JSON.stringify(input));
1752
1896
  overwriteAnswer.text = overwrittenBaseAnswer;
1897
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_2 ${overwriteAnswer.text}`, "Overwritten Base Answer");
1753
1898
  }
1754
1899
  // if we're in a subsequent execution or we want to parse results
1755
1900
  // immediately on entry, continue with evaluation
1756
- let result = (0, evaluateQuestionAnswer_1.evaluateQuestionAnswer)({ cognigy, config }, overwriteAnswer);
1901
+ let result = await (0, evaluateQuestionAnswer_1.evaluateQuestionAnswer)({ cognigy, config }, overwriteAnswer);
1902
+ // set input.result, so we can use it for validation
1903
+ input.result = result;
1757
1904
  // Verify that answer is valid based on some other conditions defined in the function
1758
1905
  const isValid = (0, validateQuestionAnswer_1.validateQuestionAnswer)(cognigy, config);
1906
+ if (!isValid) {
1907
+ input.result = null;
1908
+ }
1759
1909
  // compute how often this node was hit
1760
1910
  const executionAmount = api.getExecutionAmount(nodeId);
1761
1911
  // compute how long ago this node was hit
@@ -1792,7 +1942,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1792
1942
  // #region 0 CheckIfReconfirmationInProgress
1793
1943
  if (api.getSystemContext("reconfirmationQuestionInProgress")) {
1794
1944
  // if this is the case, we are now a YN question
1795
- let res = (0, evaluateQuestionAnswer_1.evaluateQuestionAnswer)({
1945
+ let res = await (0, evaluateQuestionAnswer_1.evaluateQuestionAnswer)({
1796
1946
  cognigy, config: {
1797
1947
  "type": "yesNo"
1798
1948
  }
@@ -1885,6 +2035,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1885
2035
  api.setLastExecutionMarker(nodeId, -1);
1886
2036
  api.resetExecutionAmount(nodeId);
1887
2037
  api.deleteSystemContext('activeQuestion');
2038
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_3 '${target}'`, "Skipped");
1888
2039
  return;
1889
2040
  }
1890
2041
  }
@@ -1957,6 +2108,8 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1957
2108
  api.resetExecutionAmount(nodeId);
1958
2109
  api.deleteSystemContext('activeQuestion');
1959
2110
  // #endregion 5.1.4 SetMarkersAndContinue
2111
+ // TODO: We need a toggle for this
2112
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_4 ${typeof result === 'object' ? JSON.stringify(result) : result}`, "Found Result");
1960
2113
  }
1961
2114
  return;
1962
2115
  // #endregion 5.1 AnswerIsValid
@@ -2260,11 +2413,14 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2260
2413
  // #region 5.2.3 Reprompt
2261
2414
  // check if there is an extra condition defined for reprompts and check whether it was truthy
2262
2415
  if (sayReprompt && repromptCondition) {
2263
- sayReprompt = !!api.parseCognigyScriptCondition(repromptCondition);
2416
+ const repromptConditionResult = !!api.parseCognigyScriptCondition(repromptCondition);
2417
+ !repromptConditionResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_6`, "Skipping Reprompt Message");
2418
+ sayReprompt = repromptConditionResult;
2264
2419
  }
2265
2420
  // if we decided to skip the reprompt on intent and there
2266
2421
  // is an intent, don't say it
2267
2422
  if (skipRepromptOnIntent && cognigy.input.intent) {
2423
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_5`, "Skipping Reprompt Message");
2268
2424
  sayReprompt = false;
2269
2425
  }
2270
2426
  // We will only output a reprompt if the user is not in the first execution
@@ -2314,12 +2470,22 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2314
2470
  temperature: 0.7,
2315
2471
  timeoutInMs: 5000,
2316
2472
  useCase: "promptNode",
2473
+ stream: repromptLLMStream,
2474
+ streamStopTokens: repromptLLMStreamStopTokens,
2475
+ streamOnDataHandler: (text) => {
2476
+ text = text && text.trim();
2477
+ if (text) {
2478
+ api.output(text, null);
2479
+ }
2480
+ }
2317
2481
  };
2318
2482
  if (repromptLLMProvider && repromptLLMProvider !== "default") {
2319
2483
  data["llmProviderReferenceId"] = repromptLLMProvider;
2320
2484
  }
2321
2485
  const repromptMessage = await api.runGenerativeAIPrompt(data, "gptPromptNode");
2322
- await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2486
+ if (!repromptLLMStream) {
2487
+ await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2488
+ }
2323
2489
  break;
2324
2490
  default: // this is also "text"
2325
2491
  await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
@@ -3,12 +3,16 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.evaluateQuestionAnswer = void 0;
4
4
  /* Custom modules */
5
5
  const getQuestionText_1 = require("./getQuestionText");
6
+ const generativeAIPrompts_1 = require("../../../../helpers/generativeAI/generativeAIPrompts");
7
+ const prompt_1 = require("../../../nlu/generativeSlotFiller/prompt");
6
8
  /**
7
9
  * Evaluates the answer of a question against its type
8
10
  */
9
- function evaluateQuestionAnswer({ cognigy, config }, overwriteAnswer) {
10
- var _a, _b, _c, _d, _e;
11
- const { type, patternType, keyphraseTag, usePositiveOnly, regex: regexField, storeDetailedResults } = config;
11
+ async function evaluateQuestionAnswer({ cognigy, config }, overwriteAnswer) {
12
+ var _a, _b, _c, _d, _e, _f;
13
+ const { type, keyphraseTag, usePositiveOnly, regex: regexField, storeDetailedResults,
14
+ // for LLM Extract functionality
15
+ entityName, entityDescription, examples, llmEntityExtractLLMProviderReferenceId } = config;
12
16
  const { api, input } = cognigy;
13
17
  const answer = overwriteAnswer || input;
14
18
  let result;
@@ -106,6 +110,43 @@ function evaluateQuestionAnswer({ cognigy, config }, overwriteAnswer) {
106
110
  case "phonenumber":
107
111
  result = api.matchPattern(type, answer.text, input.language);
108
112
  break;
113
+ case "llm_entity":
114
+ const prompt = (0, generativeAIPrompts_1.getLLMEntityExtractPrompt)(entityName, entityDescription, examples, answer.text);
115
+ const options = {
116
+ prompt,
117
+ temperature: (config === null || config === void 0 ? void 0 : config.llmentityTemperature) || 0.7,
118
+ maxTokens: 1000,
119
+ timeoutInMs: (config === null || config === void 0 ? void 0 : config.llmentityTimeout) || 5000,
120
+ useCase: "promptNode",
121
+ detailedResults: true
122
+ };
123
+ if (llmEntityExtractLLMProviderReferenceId && llmEntityExtractLLMProviderReferenceId !== "default") {
124
+ options["llmProviderReferenceId"] = llmEntityExtractLLMProviderReferenceId;
125
+ }
126
+ options["chat"] = (0, prompt_1.createLastConversationChatObject)(cognigy.lastConversationEntries, (0, generativeAIPrompts_1.getLLMEntityExtractSystemMessage)(entityName, entityDescription, examples), 5, true);
127
+ try {
128
+ const response = await api.runGenerativeAIPrompt(options, "gptPromptNode");
129
+ // find the entity in the response
130
+ result = (_f = response === null || response === void 0 ? void 0 : response.result) === null || _f === void 0 ? void 0 : _f[entityName];
131
+ if (!result) {
132
+ try {
133
+ result = JSON.parse(`{"${entityName}": ${response.result}`)[entityName];
134
+ }
135
+ catch (err) {
136
+ try {
137
+ result = JSON.parse(response.result)[entityName];
138
+ }
139
+ catch (err) {
140
+ result = null;
141
+ }
142
+ }
143
+ }
144
+ }
145
+ catch (err) {
146
+ api.log("error", `Error in evaluation of Question Node type LLM Entity Extract. Error was: ${err.message}`);
147
+ result = null;
148
+ }
149
+ break;
109
150
  case "custom":
110
151
  // the custom type is always true
111
152
  result = true;
@@ -2,12 +2,14 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.validateQuestionAnswer = void 0;
4
4
  const validateQuestionAnswer = (cognigy, config) => {
5
- const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents, } = config;
5
+ const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents } = config;
6
6
  const { input, api } = cognigy;
7
7
  let isValid = true;
8
8
  // check if there is an extra condition defined and check whether it was truthy
9
9
  if (additionalValidation) {
10
- isValid = !!api.parseCognigyScriptCondition(additionalValidation);
10
+ const additionalValidationResult = !!api.parseCognigyScriptCondition(additionalValidation);
11
+ !additionalValidationResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_7`, "Invalid Answer");
12
+ isValid = additionalValidationResult;
11
13
  }
12
14
  if (escalateIntentsAction && escalateIntentsAction !== "none") {
13
15
  // Intent matched, intent score lte escalate threshold and intent is a