@cognigy/rest-api-client 0.18.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/build/apigroups/AdministrationAPIGroup_2_0.js +3 -1
  3. package/build/apigroups/MetricsAPIGroup_2_0.js +5 -0
  4. package/build/apigroups/ResourcesAPIGroup_2_0.js +9 -6
  5. package/build/shared/charts/descriptors/agentAssist/helpers/determineMetadata.js +15 -0
  6. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/answerExtraction.helper.js +1 -1
  7. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/followUpDetection.helper.js +2 -2
  8. package/build/shared/charts/descriptors/agentAssist/identityAssist.js +1 -1
  9. package/build/shared/charts/descriptors/agentAssist/knowledgeAssist.js +1 -1
  10. package/build/shared/charts/descriptors/agentAssist/nextActionAssist.js +4 -5
  11. package/build/shared/charts/descriptors/agentAssist/sentimentAssist.js +1 -1
  12. package/build/shared/charts/descriptors/agentAssist/setAdaptiveCardTile.js +2 -0
  13. package/build/shared/charts/descriptors/agentAssist/setAgentAssistGrid.js +2 -1
  14. package/build/shared/charts/descriptors/agentAssist/setHtmlTile.js +5 -3
  15. package/build/shared/charts/descriptors/agentAssist/setIframeTile.js +5 -3
  16. package/build/shared/charts/descriptors/agentAssist/setSecureFormsTile.js +2 -2
  17. package/build/shared/charts/descriptors/agentAssist/transcriptAssist.js +2 -1
  18. package/build/shared/charts/descriptors/analytics/activateProfile.js +1 -0
  19. package/build/shared/charts/descriptors/analytics/blindMode.js +2 -0
  20. package/build/shared/charts/descriptors/analytics/completeGoal.js +1 -0
  21. package/build/shared/charts/descriptors/analytics/deactivateProfile.js +1 -0
  22. package/build/shared/charts/descriptors/analytics/deleteProfile.js +1 -0
  23. package/build/shared/charts/descriptors/analytics/index.js +3 -1
  24. package/build/shared/charts/descriptors/analytics/mergeProfile.js +1 -0
  25. package/build/shared/charts/descriptors/analytics/overwriteAnalytics.js +9 -0
  26. package/build/shared/charts/descriptors/analytics/setRating.js +4 -2
  27. package/build/shared/charts/descriptors/analytics/trackMilestone.js +95 -0
  28. package/build/shared/charts/descriptors/analytics/updateProfile.js +1 -0
  29. package/build/shared/charts/descriptors/apps/initAppSession.js +1 -0
  30. package/build/shared/charts/descriptors/apps/setAdaptiveCardAppState.js +35 -10
  31. package/build/shared/charts/descriptors/apps/setHtmlAppState.js +25 -2
  32. package/build/shared/charts/descriptors/apps/utils/getXAppsOverlaySettings.js +54 -0
  33. package/build/shared/charts/descriptors/connectionNodes/documentParserProviders/azureAIDocumentIntelligenceConnection.js +12 -0
  34. package/build/shared/charts/descriptors/connectionNodes/documentParserProviders/index.js +13 -0
  35. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/awsBedrockProviderConnection.js +12 -0
  36. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnection.js +4 -3
  37. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnectionV2.js +3 -3
  38. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +14 -0
  39. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +16 -8
  40. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/openAIProviderConnection.js +3 -3
  41. package/build/shared/charts/descriptors/data/addToContext.js +7 -0
  42. package/build/shared/charts/descriptors/data/copyDataToContext.js +3 -0
  43. package/build/shared/charts/descriptors/data/copySlotsToContext.js +3 -0
  44. package/build/shared/charts/descriptors/data/debugMessage.js +73 -0
  45. package/build/shared/charts/descriptors/data/index.js +3 -1
  46. package/build/shared/charts/descriptors/data/removeFromContext.js +9 -1
  47. package/build/shared/charts/descriptors/data/resetContext.js +1 -0
  48. package/build/shared/charts/descriptors/index.js +7 -1
  49. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +35 -10
  50. package/build/shared/charts/descriptors/logic/resetState.js +1 -0
  51. package/build/shared/charts/descriptors/logic/setState.js +2 -1
  52. package/build/shared/charts/descriptors/logic/setTranslation.js +3 -1
  53. package/build/shared/charts/descriptors/logic/switchLocale.js +1 -0
  54. package/build/shared/charts/descriptors/logic/think.js +3 -1
  55. package/build/shared/charts/descriptors/logic/thinkV2.js +2 -0
  56. package/build/shared/charts/descriptors/message/question/question.js +50 -5
  57. package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -2
  58. package/build/shared/charts/descriptors/nlu/cleanText.js +1 -0
  59. package/build/shared/charts/descriptors/nlu/fuzzySearch.js +23 -1
  60. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +19 -9
  61. package/build/shared/charts/descriptors/service/GPTPrompt.js +59 -24
  62. package/build/shared/charts/descriptors/service/LLMEntityExtract.js +11 -2
  63. package/build/shared/charts/descriptors/service/handoverV2.js +84 -1
  64. package/build/shared/charts/descriptors/service/httpRequest.js +35 -2
  65. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +96 -17
  66. package/build/shared/charts/descriptors/voice/nodes/bargeIn.js +2 -0
  67. package/build/shared/charts/descriptors/voice/nodes/continuousAsr.js +2 -0
  68. package/build/shared/charts/descriptors/voice/nodes/dtmf.js +2 -0
  69. package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -0
  70. package/build/shared/charts/descriptors/voice/nodes/noUserInput.js +2 -0
  71. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +2 -0
  72. package/build/shared/charts/descriptors/voice/nodes/transfer.js +2 -0
  73. package/build/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +2 -0
  74. package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +2 -0
  75. package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +3 -0
  76. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -0
  77. package/build/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -0
  78. package/build/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -0
  79. package/build/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -0
  80. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +298 -8
  81. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +10 -6
  82. package/build/shared/charts/descriptors/voicegateway2/utils/helper.js +2 -2
  83. package/build/shared/charts/helpers/generativeAI/rephraseSentenceWithAi.js +4 -2
  84. package/build/shared/constants.js +10 -1
  85. package/build/shared/handoverClients/interfaces/THandoverEventType.js +1 -0
  86. package/build/shared/helper/logFullConfigToDebugMode.js +30 -0
  87. package/build/shared/helper/nlu/textCleaner.js +3 -1
  88. package/build/shared/interfaces/IOrganisation.js +1 -0
  89. package/build/shared/interfaces/IProfile.js +1 -0
  90. package/build/shared/interfaces/IProfileSchema.js +3 -0
  91. package/build/shared/interfaces/analytics/IAnalyticsSourceData.js +20 -20
  92. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourneyStep_2_0.js → analytics/IMilestoneAnalytics.js} +1 -1
  93. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourneyProgress_2_0.js → appsession/ISetAppStateOptions.js} +1 -1
  94. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourneyIndexItem_2_0.js → appsession/ISetAppStateOverlaySettings.js} +1 -1
  95. package/build/shared/interfaces/appsession/ISetAppStateOverlaySettingsMetaData.js +3 -0
  96. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IIndexJourneysRest_2_0.js → debugEvents/TDebugEventMessagePayload.js} +1 -1
  97. package/build/shared/interfaces/debugEvents/TDebugEventType.js +2 -0
  98. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +33 -69
  99. package/build/shared/interfaces/handover.js +42 -1
  100. package/build/shared/interfaces/license.js +3 -2
  101. package/build/shared/interfaces/license.js.map +1 -1
  102. package/build/shared/interfaces/messageAPI/endpoints.js +2 -0
  103. package/build/shared/interfaces/messageAPI/handover.js +20 -1
  104. package/build/shared/interfaces/resources/IConnection.js +1 -0
  105. package/build/shared/interfaces/resources/IEndpoint.js +1 -2
  106. package/build/shared/interfaces/resources/ILargeLanguageModel.js +25 -4
  107. package/build/shared/interfaces/resources/IMilestone.js +1 -1
  108. package/build/shared/interfaces/resources/INodeDescriptorSet.js +12 -0
  109. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  110. package/build/shared/interfaces/resources/settings/IAgentSettings.js +12 -7
  111. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -0
  112. package/build/shared/interfaces/resources/settings/IKnowledgeAISettings.js +18 -0
  113. package/build/shared/interfaces/resources/settings/index.js +4 -1
  114. package/build/shared/interfaces/restAPI/administration/organisations/v2.0/IReadCollectionsToBeDeletedRest_2_0.js +0 -1
  115. package/build/shared/interfaces/restAPI/administration/organisations/v2.0/IReadOrganisationKnowledgeChunksCountRest_2_0.js +3 -0
  116. package/build/shared/interfaces/restAPI/operations/nlu/v2.0/IGenerateNluScoresRest_2_0.js +57 -0
  117. package/build/shared/interfaces/restAPI/{resources/journey/v2.0/IJourney_2_0.js → operations/nlu/v2.0/index.js} +1 -1
  118. package/build/shared/interfaces/restAPI/resources/largeLanguageModel/v2.0/IAvailableModelsForLLMProvider_2_0 .js +18 -0
  119. package/build/shared/interfaces/restAPI/resources/largeLanguageModel/v2.0/IGetAvailableModelsForLLMRest_2_0 .js +3 -0
  120. package/build/shared/interfaces/security/IRole.js +2 -0
  121. package/build/shared/interfaces/security/ISystemCapabilities.js +3 -0
  122. package/build/shared/interfaces/security/index.js +1 -1
  123. package/build/shared/interfaces/trainer/ITrainerRecord.js +2 -2
  124. package/build/shared/interfaces/user.js +1 -1
  125. package/package.json +2 -2
  126. package/types/index.d.ts +1254 -670
  127. package/build/shared/interfaces/journeys/IJourney.js +0 -83
  128. package/build/shared/interfaces/journeys/IJourneyProgress.js +0 -40
  129. package/build/shared/interfaces/journeys/IJourneyTrackEvent.js +0 -35
  130. package/build/shared/interfaces/journeys/index.js +0 -14
  131. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IJourneyTrackEvents_2_0.js +0 -3
  132. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IReadJourneyProgressRest_2_0.js +0 -3
  133. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IReadJourneyRest_2_0.js +0 -3
  134. package/build/shared/interfaces/restAPI/resources/journey/v2.0/ITrackJourneyEventRest_2_0.js +0 -3
  135. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IUpdateSelectedJourneyRest_2_0.js +0 -3
  136. /package/build/shared/interfaces/restAPI/{resources/journey/v2.0 → operations}/index.js +0 -0
@@ -196,6 +196,25 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
196
196
  tagLimit: 5
197
197
  }
198
198
  },
199
+ {
200
+ key: "searchSourceTagsFilterOp",
201
+ type: "select",
202
+ label: "UI__NODE_EDITOR__KNOWLEDGE_SEARCH__SOURCE_TAGS_FILTER_OP__LABEL",
203
+ description: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__DESCRIPTION",
204
+ defaultValue: "and",
205
+ params: {
206
+ options: [
207
+ {
208
+ label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__AND__LABEL",
209
+ value: "and"
210
+ },
211
+ {
212
+ label: "UI__NODE_EDITOR__SEARCH_EXTRACT_OUTPUT__FIELDS__SOURCE_TAGS_FILTER_OP__OPTIONS__OR__LABEL",
213
+ value: "or"
214
+ },
215
+ ]
216
+ }
217
+ },
199
218
  {
200
219
  key: "prompt",
201
220
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__PROMPT__LABEL",
@@ -451,7 +470,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
451
470
  key: "searchSettings",
452
471
  label: "Search Settings",
453
472
  defaultCollapsed: true,
454
- fields: ["followUpDetection", "followUpDetectionSteps", "searchString", "searchStringDescription", "topK", "searchStoreLocation", "searchStoreWarning", "searchStoreLocationInputKey", "searchStoreLocationContextKey", "searchSourceTags"]
473
+ fields: ["followUpDetection", "followUpDetectionSteps", "searchString", "searchStringDescription", "topK", "searchStoreLocation", "searchStoreWarning", "searchStoreLocationInputKey", "searchStoreLocationContextKey", "searchSourceTags", "searchSourceTagsFilterOp"]
455
474
  },
456
475
  {
457
476
  key: "extractSettings",
@@ -532,7 +551,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
532
551
  var _a, _b, _c, _d, _e, _f, _g, _h, _j;
533
552
  const { cognigy, config, nodeId } = knowledgeSearchParams;
534
553
  const { input, api } = cognigy;
535
- const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
554
+ const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
536
555
  let { followUpDetectionSteps } = config;
537
556
  let { prompt } = config;
538
557
  // timeout message name not changed because of legacy compatibility
@@ -554,6 +573,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
554
573
  }
555
574
  let actualSearchString = searchString;
556
575
  // check if follow up detection is active and if yes, handle accordingly
576
+ // this is "context aware search"
557
577
  if (followUpDetection === "transcript") {
558
578
  let prompt;
559
579
  let lastRoundTrip;
@@ -574,9 +594,9 @@ Does the last USER input refer to the conversation before?
574
594
  Answer with "true" or "false". Answer:`;
575
595
  let promptResponse;
576
596
  try {
577
- promptResponse = await api.runGenerativeAIPrompt({ prompt }, "gptPromptNode");
597
+ promptResponse = await api.runGenerativeAIPrompt({ prompt }, "answerExtraction");
578
598
  // if we're in adminconsole, process debugging options
579
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, promptResponse, debugLogTokenCount, false, cognigy);
599
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, promptResponse, debugLogTokenCount, false, cognigy);
580
600
  // check if LLM thinks the input was a follow up
581
601
  if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
582
602
  prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
@@ -593,11 +613,12 @@ Context:
593
613
  ${lastRoundTrip}
594
614
  Question: ${searchString}
595
615
  New: `;
596
- promptResponse = await api.runGenerativeAIPrompt({ prompt }, "gptPromptNode");
616
+ promptResponse = await api.runGenerativeAIPrompt({ prompt }, "answerExtraction");
597
617
  // if we're in adminconsole, process debugging options
598
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection 2", prompt, promptResponse, debugLogTokenCount, false, cognigy);
618
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection 2", prompt, promptResponse, debugLogTokenCount, false, cognigy);
599
619
  // the actual search string to now use is the rewritten question
600
620
  actualSearchString = promptResponse;
621
+ api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
601
622
  }
602
623
  }
603
624
  catch (err) {
@@ -612,6 +633,7 @@ New: `;
612
633
  const compactError = {
613
634
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
614
635
  };
636
+ api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
615
637
  if ((_a = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.code) {
616
638
  compactError["code"] = error.originalErrorDetails.code;
617
639
  }
@@ -673,7 +695,10 @@ New: `;
673
695
  searchSourceTags.forEach((tag, index) => {
674
696
  searchSourceTags[index] = tag.toString();
675
697
  });
676
- data.tags = searchSourceTags;
698
+ data.tagsData = {
699
+ tags: searchSourceTags,
700
+ op: searchSourceTagsFilterOp || "and" // default to "and" if not set (backwards compatibility)
701
+ };
677
702
  }
678
703
  // Add knowledgeStoreIds to data
679
704
  if (knowledgeStoreId) {
@@ -682,7 +707,7 @@ New: `;
682
707
  // Perform knowledge search
683
708
  try {
684
709
  const knowledgeSearchResponse = await api.knowledgeSearch(data);
685
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
710
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
686
711
  // Handle possible response errors
687
712
  if ((knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.status) !== "success") {
688
713
  const errorMessage = (knowledgeSearchResponse === null || knowledgeSearchResponse === void 0 ? void 0 : knowledgeSearchResponse.error) || "empty";
@@ -750,9 +775,9 @@ New: `;
750
775
  }
751
776
  let promptResponse;
752
777
  try {
753
- promptResponse = await api.runGenerativeAIPrompt(promptData, "gptPromptNode");
778
+ promptResponse = await api.runGenerativeAIPrompt(promptData, "answerExtraction");
754
779
  // if we're in adminconsole, process debugging options
755
- input.channel === "adminconsole" && (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Main Prompt", prompt, promptResponse, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
780
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Main Prompt", prompt, promptResponse, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
756
781
  if (storeLocation === "context") {
757
782
  api.addToContext(contextKey, promptResponse, "simple");
758
783
  }
@@ -22,6 +22,7 @@ exports.RESET_STATE = (0, createNodeDescriptor_1.createNodeDescriptor)({
22
22
  const { api, input } = cognigy;
23
23
  const state = await api.resetState();
24
24
  input.state = state;
25
+ api.logDebugMessage(`UI__DEBUG_MODE__RESET_STATE__MESSAGE`);
25
26
  }
26
27
  });
27
28
  //# sourceMappingURL=resetState.js.map
@@ -30,7 +30,7 @@ exports.SET_STATE = (0, createNodeDescriptor_1.createNodeDescriptor)({
30
30
  {
31
31
  key: "data",
32
32
  type: "json",
33
- label: "UI__NODE_EDITOR__SET_STATE__DATA__LABEL "
33
+ label: "UI__NODE_EDITOR__SET_STATE__DATA__LABEL"
34
34
  }
35
35
  ],
36
36
  preview: {
@@ -43,6 +43,7 @@ exports.SET_STATE = (0, createNodeDescriptor_1.createNodeDescriptor)({
43
43
  const { input, api } = cognigy;
44
44
  api.setState(state);
45
45
  input.state = state;
46
+ api.logDebugMessage(`UI__DEBUG_MODE__SET_STATE__MESSAGE ${state}`);
46
47
  if (text || data) {
47
48
  api.think(text, data);
48
49
  }
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.SET_TRANSLATION = void 0;
4
4
  /** Custom Modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
6
7
  exports.SET_TRANSLATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
7
8
  type: "setTranslation",
8
9
  defaultLabel: "Set Translation",
@@ -72,7 +73,7 @@ exports.SET_TRANSLATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
72
73
  { type: "field", key: "alwaysRemoveNoTranslateMarker" },
73
74
  ],
74
75
  function: async ({ cognigy, config }) => {
75
- const { api } = cognigy;
76
+ const { api, input } = cognigy;
76
77
  const { translationEnabled, flowLanguage, inputLanguage, padPayloads, alwaysRemoveNoTranslateMarker, noTranslateMarker, setInputLanguageOnExecutionCount } = config;
77
78
  api.setTranslationSettings({
78
79
  translationEnabled,
@@ -83,6 +84,7 @@ exports.SET_TRANSLATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
83
84
  noTranslateMarker,
84
85
  setInputLanguageOnExecutionCount
85
86
  });
87
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
86
88
  }
87
89
  });
88
90
  //# sourceMappingURL=setTranslation.js.map
@@ -59,6 +59,7 @@ exports.SWITCH_LOCALE = (0, createNodeDescriptor_1.createNodeDescriptor)({
59
59
  else {
60
60
  await api.setLocaleReferenceId(localeReferenceId);
61
61
  }
62
+ api.logDebugMessage(`UI__DEBUG_MODE__SWITCH_LOCALE__MESSAGE`);
62
63
  }
63
64
  });
64
65
  //# sourceMappingURL=switchLocale.js.map
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.THINK = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
6
7
  /**
7
8
  * Node name: 'think'
8
9
  *
@@ -93,10 +94,11 @@ exports.THINK = (0, createNodeDescriptor_1.createNodeDescriptor)({
93
94
  key: "text",
94
95
  type: "text",
95
96
  },
96
- tags: ["basic", "logic", "recursion", "inject"],
97
+ tags: ["logic", "recursion", "inject"],
97
98
  function: async ({ cognigy, config }) => {
98
99
  const { text, data, intent, thinkType, } = config;
99
100
  const { api } = cognigy;
101
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
100
102
  if (thinkType === "intent") {
101
103
  api.think(`cIntent:${intent}`, null);
102
104
  }
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.THINK_V2 = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
6
7
  /**
7
8
  * Node name: 'think'
8
9
  *
@@ -93,6 +94,7 @@ exports.THINK_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
93
94
  function: async ({ cognigy, config }) => {
94
95
  const { text, data, intent, thinkType, } = config;
95
96
  const { api } = cognigy;
97
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
96
98
  if (thinkType === "intent") {
97
99
  api.thinkV2(`cIntent:${intent}`, null);
98
100
  }
@@ -324,6 +324,16 @@ DO NOT talk about other topics. Do not offer general assistance.`,
324
324
  value: "llm",
325
325
  }
326
326
  },
327
+ {
328
+ key: "repromptLLMStream",
329
+ label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__STREAM_REPROMPT__LABEL",
330
+ type: "toggle",
331
+ defaultValue: false,
332
+ condition: {
333
+ key: "repromptType",
334
+ value: "llm",
335
+ }
336
+ },
327
337
  {
328
338
  key: "repromptLLMTurns",
329
339
  label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__TRANSCRIPT_STEPS__LABEL",
@@ -340,6 +350,17 @@ DO NOT talk about other topics. Do not offer general assistance.`,
340
350
  value: "llm",
341
351
  }
342
352
  },
353
+ {
354
+ key: "repromptLLMStreamStopTokens",
355
+ type: "textArray",
356
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
357
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
358
+ defaultValue: [".", "!", "?", "\\n"],
359
+ condition: {
360
+ key: "repromptLLMStream",
361
+ value: true,
362
+ }
363
+ },
343
364
  {
344
365
  key: "repromptFlowNode",
345
366
  type: "flowNode",
@@ -452,6 +473,9 @@ DO NOT talk about other topics. Do not offer general assistance.`,
452
473
  type: "cognigyText",
453
474
  label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__CONTEXT_KEY__LABEL",
454
475
  defaultValue: "result",
476
+ params: {
477
+ noTranslate: true
478
+ },
455
479
  condition: {
456
480
  key: "storeResultInContext",
457
481
  value: true
@@ -1579,7 +1603,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1579
1603
  },
1580
1604
  ],
1581
1605
  },
1582
- },
1606
+ }
1583
1607
  ].filter(field => !!field),
1584
1608
  sections: [
1585
1609
  {
@@ -1603,6 +1627,8 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1603
1627
  "repromptLLMProvider",
1604
1628
  "repromptLLMPrompt",
1605
1629
  "repromptLLMTurns",
1630
+ "repromptLLMStream",
1631
+ "repromptLLMStreamStopTokens",
1606
1632
  "repromptSay",
1607
1633
  "repromptFlowNode",
1608
1634
  "repromptParseIntents",
@@ -1771,7 +1797,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1771
1797
  "key": "type",
1772
1798
  "value": "date"
1773
1799
  }),
1774
- (0, getRephraseWithAIFields_1.getRephraseWithAISection)(),
1800
+ (0, getRephraseWithAIFields_1.getRephraseWithAISection)()
1775
1801
  ].filter(section => !!section),
1776
1802
  form: [
1777
1803
  { type: "field", key: "type" },
@@ -1806,7 +1832,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1806
1832
  //#endregion DescriptorFields
1807
1833
  function: async ({ cognigy, nodeId, organisationId, config, inputOptions }) => {
1808
1834
  var _a, _b, _c;
1809
- const { say, type, validationMessage, repromptLLMProvider, repromptType, repromptLLMPrompt, repromptLLMTurns, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1835
+ const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1810
1836
  const { input, context, profile, api } = cognigy;
1811
1837
  const rephraseWithAIParams = {
1812
1838
  generativeAI_rephraseOutputMode: config.generativeAI_rephraseOutputMode,
@@ -1861,11 +1887,14 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1861
1887
  overwriteAnswer = await api.executeCognigyNLU(overwriteAnswer.text, input.data, input.inputId, { parseIntents: true, parseSlots: true, parseSystemSlots: true, findType: true });
1862
1888
  overwriteAnswer.text = cleanedAnswer;
1863
1889
  }
1890
+ // DEBUG MODE LOGS
1891
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_1 ${overwriteAnswer.text}`, "Applied Answer Preprocessing");
1864
1892
  }
1865
1893
  else if (overwrittenBaseAnswer) {
1866
1894
  // if only overwrittenBaseAnswer was specified, copy the input and overwrite the text property only
1867
1895
  overwriteAnswer = JSON.parse(JSON.stringify(input));
1868
1896
  overwriteAnswer.text = overwrittenBaseAnswer;
1897
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_2 ${overwriteAnswer.text}`, "Overwritten Base Answer");
1869
1898
  }
1870
1899
  // if we're in a subsequent execution or we want to parse results
1871
1900
  // immediately on entry, continue with evaluation
@@ -2006,6 +2035,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2006
2035
  api.setLastExecutionMarker(nodeId, -1);
2007
2036
  api.resetExecutionAmount(nodeId);
2008
2037
  api.deleteSystemContext('activeQuestion');
2038
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_3 '${target}'`, "Skipped");
2009
2039
  return;
2010
2040
  }
2011
2041
  }
@@ -2078,6 +2108,8 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2078
2108
  api.resetExecutionAmount(nodeId);
2079
2109
  api.deleteSystemContext('activeQuestion');
2080
2110
  // #endregion 5.1.4 SetMarkersAndContinue
2111
+ // TODO: We need a toggle for this
2112
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_4 ${typeof result === 'object' ? JSON.stringify(result) : result}`, "Found Result");
2081
2113
  }
2082
2114
  return;
2083
2115
  // #endregion 5.1 AnswerIsValid
@@ -2381,11 +2413,14 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2381
2413
  // #region 5.2.3 Reprompt
2382
2414
  // check if there is an extra condition defined for reprompts and check whether it was truthy
2383
2415
  if (sayReprompt && repromptCondition) {
2384
- sayReprompt = !!api.parseCognigyScriptCondition(repromptCondition);
2416
+ const repromptConditionResult = !!api.parseCognigyScriptCondition(repromptCondition);
2417
+ !repromptConditionResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_6`, "Skipping Reprompt Message");
2418
+ sayReprompt = repromptConditionResult;
2385
2419
  }
2386
2420
  // if we decided to skip the reprompt on intent and there
2387
2421
  // is an intent, don't say it
2388
2422
  if (skipRepromptOnIntent && cognigy.input.intent) {
2423
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_5`, "Skipping Reprompt Message");
2389
2424
  sayReprompt = false;
2390
2425
  }
2391
2426
  // We will only output a reprompt if the user is not in the first execution
@@ -2435,12 +2470,22 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2435
2470
  temperature: 0.7,
2436
2471
  timeoutInMs: 5000,
2437
2472
  useCase: "promptNode",
2473
+ stream: repromptLLMStream,
2474
+ streamStopTokens: repromptLLMStreamStopTokens,
2475
+ streamOnDataHandler: (text) => {
2476
+ text = text && text.trim();
2477
+ if (text) {
2478
+ api.output(text, null);
2479
+ }
2480
+ }
2438
2481
  };
2439
2482
  if (repromptLLMProvider && repromptLLMProvider !== "default") {
2440
2483
  data["llmProviderReferenceId"] = repromptLLMProvider;
2441
2484
  }
2442
2485
  const repromptMessage = await api.runGenerativeAIPrompt(data, "gptPromptNode");
2443
- await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2486
+ if (!repromptLLMStream) {
2487
+ await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2488
+ }
2444
2489
  break;
2445
2490
  default: // this is also "text"
2446
2491
  await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
@@ -2,12 +2,14 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.validateQuestionAnswer = void 0;
4
4
  const validateQuestionAnswer = (cognigy, config) => {
5
- const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents, } = config;
5
+ const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents } = config;
6
6
  const { input, api } = cognigy;
7
7
  let isValid = true;
8
8
  // check if there is an extra condition defined and check whether it was truthy
9
9
  if (additionalValidation) {
10
- isValid = !!api.parseCognigyScriptCondition(additionalValidation);
10
+ const additionalValidationResult = !!api.parseCognigyScriptCondition(additionalValidation);
11
+ !additionalValidationResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_7`, "Invalid Answer");
12
+ isValid = additionalValidationResult;
11
13
  }
12
14
  if (escalateIntentsAction && escalateIntentsAction !== "none") {
13
15
  // Intent matched, intent score lte escalate threshold and intent is a
@@ -84,6 +84,7 @@ exports.CLEAN_TEXT = (0, createNodeDescriptor_1.createNodeDescriptor)({
84
84
  localeToUse = cleanTextLocale;
85
85
  }
86
86
  const result = new textCleaner_1.TextCleaner(localeToUse, additionalAllowedCharacters, additionalMappedSymbols, additionalSpecialPhrases, additionalPhoneticAlphabet).cleanAll(textToClean, options, (_b = input === null || input === void 0 ? void 0 : input.nlu) === null || _b === void 0 ? void 0 : _b.detailedSlots);
87
+ result !== textToClean && api.logDebugMessage(`UI__DEBUG_MODE__CLEAN_TEXT__MESSAGE ${textToClean}<br>UI__DEBUG_MODE__CLEAN_TEXT__MESSAGE_2 ${result}`);
87
88
  if (storeLocation === "context") {
88
89
  api.addToContext(contextKeyToStoreResult, result, "simple");
89
90
  }
@@ -211,7 +211,8 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
211
211
  { type: "section", key: "storageOption" }
212
212
  ],
213
213
  function: async ({ cognigy, config }) => {
214
- const { api } = cognigy;
214
+ var _a, _b, _c, _d, _e, _f, _g;
215
+ const { api, input } = cognigy;
215
216
  const { searchPattern, items, distance, findAllMatches, ignoreLocation, includeMatches, includeScore, isCaseSensitive, location, minMatchCharLength, shouldSort, threshold, storeLocation, inputKey, contextKey } = config;
216
217
  if (!searchPattern)
217
218
  throw new Error("No search pattern provided.");
@@ -244,11 +245,31 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
244
245
  }
245
246
  });
246
247
  }
248
+ if (result && result.length > 0 && ((_a = result === null || result === void 0 ? void 0 : result[0]) === null || _a === void 0 ? void 0 : _a.item) && (input.endpointType === "adminconsole" || api.getMetadata().isFollowSessionActive)) {
249
+ let debugMessage = `Found ${result.length} match${result.length > 1 ? 'es' : ''}.<br><br>`;
250
+ if (result.length > 3) {
251
+ debugMessage += `Top 3:<br>`;
252
+ }
253
+ if (result.length >= 1 && ((_b = result === null || result === void 0 ? void 0 : result[0]) === null || _b === void 0 ? void 0 : _b.item) && ((_c = result === null || result === void 0 ? void 0 : result[0]) === null || _c === void 0 ? void 0 : _c.score)) {
254
+ debugMessage += `1. ${result[0].item} (score: ${result[0].score})<br>`;
255
+ }
256
+ if (result.length >= 2 && ((_d = result === null || result === void 0 ? void 0 : result[1]) === null || _d === void 0 ? void 0 : _d.item) && ((_e = result === null || result === void 0 ? void 0 : result[1]) === null || _e === void 0 ? void 0 : _e.score)) {
257
+ debugMessage += `2. ${result[1].item} (score: ${result[1].score})<br>`;
258
+ }
259
+ if (result.length >= 3 && ((_f = result === null || result === void 0 ? void 0 : result[2]) === null || _f === void 0 ? void 0 : _f.item) && ((_g = result === null || result === void 0 ? void 0 : result[2]) === null || _g === void 0 ? void 0 : _g.score)) {
260
+ debugMessage += `3. ${result[2].item} (score: ${result[2].score})`;
261
+ }
262
+ api.logDebugMessage(debugMessage, "Result Found");
263
+ }
264
+ else {
265
+ api.logDebugMessage("UI__DEBUG_MODE__FUZZY_SEARCH__NO_RESULTS", "No Results");
266
+ }
247
267
  }
248
268
  else {
249
269
  result = {
250
270
  "error": `Source data file size limit (${getFuzzySearchMaxObjectSizeInBytes()} bytes) exceeded`
251
271
  };
272
+ api.logDebugError(`Source data file size limit (${getFuzzySearchMaxObjectSizeInBytes()} bytes) exceeded`, "Size Limit Exceeded");
252
273
  }
253
274
  if (storeLocation === "context") {
254
275
  api.addToContext(contextKey, result, "simple");
@@ -264,6 +285,7 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
264
285
  else {
265
286
  api.addToInput(inputKey, error);
266
287
  }
288
+ api.logDebugError(error.message, "Error");
267
289
  }
268
290
  }
269
291
  });
@@ -135,7 +135,15 @@ exports.createLastUserInputString = createLastUserInputString;
135
135
  * @param cognigy the cognigy object (input, api, etc)
136
136
  */
137
137
  const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => {
138
- const { api } = cognigy;
138
+ var _a, _b, _c, _d;
139
+ const { api, input } = cognigy;
140
+ if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
141
+ // only return logs if in interaction panel or following session
142
+ return;
143
+ }
144
+ // stringify the response if it is an object
145
+ const responseOutput = typeof response === "object" ? JSON.stringify(response.result || response) : response;
146
+ const responseOutputFormatted = typeof response === "object" ? JSON.stringify(response.result || response, null, 4) : response;
139
147
  // debug logs are only processed for the interaction panel
140
148
  if (debugLogRequestAndCompletion) {
141
149
  try {
@@ -143,16 +151,15 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
143
151
  let completionTokenMessage = "";
144
152
  if (debugLogTokenCount) {
145
153
  if (prompt) {
146
- const requestTokens = await api.countGPTTokens(prompt);
154
+ const requestTokens = ((_a = response === null || response === void 0 ? void 0 : response.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) || await api.countGPTTokens(prompt);
147
155
  requestTokenMessage = ` (${requestTokens} Tokens)`;
148
156
  }
149
157
  if (response) {
150
- const message = response.result || response;
151
- const completionTokens = await api.countGPTTokens(message);
158
+ const completionTokens = ((_b = response === null || response === void 0 ? void 0 : response.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || await api.countGPTTokens(responseOutput);
152
159
  completionTokenMessage = ` (${completionTokens} Tokens)`;
153
160
  }
154
161
  }
155
- api.log('debug', `${label} - Request${requestTokenMessage}: '${prompt}' - Completion${completionTokenMessage}: '${response}'`);
162
+ api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER");
156
163
  }
157
164
  catch (err) { }
158
165
  }
@@ -160,10 +167,13 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
160
167
  try {
161
168
  let requestTokens = 0;
162
169
  let completionTokens = 0;
163
- requestTokens = prompt && await api.countGPTTokens(prompt);
164
- completionTokens = response && await api.countGPTTokens(response);
165
- const completionMessage = response ? ` - Completion Tokens: ${completionTokens} - Total Tokens: ${requestTokens + completionTokens}` : "";
166
- api.log('debug', `${label} - Request Tokens: ${requestTokens} ${completionMessage}`);
170
+ requestTokens = ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.prompt_tokens) || await api.countGPTTokens(prompt);
171
+ completionTokens = ((_d = response.usage) === null || _d === void 0 ? void 0 : _d.completion_tokens) || await api.countGPTTokens(responseOutput);
172
+ const requestTokenMessage = requestTokens || "unknown";
173
+ const completionTokenMessage = completionTokens || "unknown";
174
+ const totalTokens = (requestTokens + completionTokens) || "unknown";
175
+ const completionMessage = response ? `<br>UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__COMPLETION_TOKENS: ${completionTokenMessage}<br>UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__TOTAL_TOKENS: ${totalTokens}` : "";
176
+ api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__REQUEST_TOKENS: ${requestTokenMessage}${completionMessage}`, "UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__HEADER");
167
177
  }
168
178
  catch (err) { }
169
179
  }