@cognigy/rest-api-client 0.18.0 → 0.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/build/RestAPIClient.js +7 -0
  3. package/build/apigroups/AdministrationAPIGroup_2_0.js +8 -1
  4. package/build/apigroups/JWTAuthAPIGroup_2_0.js +12 -0
  5. package/build/apigroups/MetricsAPIGroup_2_0.js +5 -0
  6. package/build/apigroups/ResourcesAPIGroup_2_0.js +28 -14
  7. package/build/apigroups/index.js +3 -1
  8. package/build/shared/charts/descriptors/agentAssist/helpers/determineMetadata.js +15 -0
  9. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/answerExtraction.helper.js +1 -1
  10. package/build/shared/charts/descriptors/agentAssist/helpers/knowledgeSearch/followUpDetection.helper.js +2 -2
  11. package/build/shared/charts/descriptors/agentAssist/identityAssist.js +1 -1
  12. package/build/shared/charts/descriptors/agentAssist/index.js +3 -1
  13. package/build/shared/charts/descriptors/agentAssist/knowledgeAssist.js +1 -1
  14. package/build/shared/charts/descriptors/agentAssist/nextActionAssist.js +4 -5
  15. package/build/shared/charts/descriptors/agentAssist/sendData.js +74 -0
  16. package/build/shared/charts/descriptors/agentAssist/sentimentAssist.js +1 -1
  17. package/build/shared/charts/descriptors/agentAssist/setAdaptiveCardTile.js +2 -0
  18. package/build/shared/charts/descriptors/agentAssist/setAgentAssistGrid.js +2 -1
  19. package/build/shared/charts/descriptors/agentAssist/setHtmlTile.js +5 -3
  20. package/build/shared/charts/descriptors/agentAssist/setIframeTile.js +5 -3
  21. package/build/shared/charts/descriptors/agentAssist/setSecureFormsTile.js +2 -2
  22. package/build/shared/charts/descriptors/agentAssist/transcriptAssist.js +2 -1
  23. package/build/shared/charts/descriptors/analytics/activateProfile.js +1 -0
  24. package/build/shared/charts/descriptors/analytics/addMemory.js +51 -0
  25. package/build/shared/charts/descriptors/analytics/blindMode.js +2 -0
  26. package/build/shared/charts/descriptors/analytics/completeGoal.js +4 -2
  27. package/build/shared/charts/descriptors/analytics/deactivateProfile.js +1 -0
  28. package/build/shared/charts/descriptors/analytics/deleteProfile.js +1 -0
  29. package/build/shared/charts/descriptors/analytics/helper.js +20 -0
  30. package/build/shared/charts/descriptors/analytics/index.js +5 -1
  31. package/build/shared/charts/descriptors/analytics/mergeProfile.js +1 -0
  32. package/build/shared/charts/descriptors/analytics/overwriteAnalytics.js +9 -0
  33. package/build/shared/charts/descriptors/analytics/setRating.js +4 -2
  34. package/build/shared/charts/descriptors/analytics/trackGoal.js +102 -0
  35. package/build/shared/charts/descriptors/analytics/updateProfile.js +1 -0
  36. package/build/shared/charts/descriptors/apps/initAppSession.js +1 -0
  37. package/build/shared/charts/descriptors/apps/setAdaptiveCardAppState.js +35 -10
  38. package/build/shared/charts/descriptors/apps/setHtmlAppState.js +25 -2
  39. package/build/shared/charts/descriptors/apps/utils/getXAppsOverlaySettings.js +54 -0
  40. package/build/shared/charts/descriptors/connectionNodes/documentParserProviders/azureAIDocumentIntelligenceConnection.js +12 -0
  41. package/build/shared/charts/descriptors/connectionNodes/documentParserProviders/index.js +13 -0
  42. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/awsBedrockProviderConnection.js +12 -0
  43. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnection.js +4 -3
  44. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderConnectionV2.js +3 -3
  45. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/azureOpenAIProviderOauth2Connection.js +14 -0
  46. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/index.js +16 -8
  47. package/build/shared/charts/descriptors/connectionNodes/generativeAIProviders/openAIProviderConnection.js +3 -3
  48. package/build/shared/charts/descriptors/data/addToContext.js +7 -0
  49. package/build/shared/charts/descriptors/data/copyDataToContext.js +3 -0
  50. package/build/shared/charts/descriptors/data/copySlotsToContext.js +4 -1
  51. package/build/shared/charts/descriptors/data/debugMessage.js +73 -0
  52. package/build/shared/charts/descriptors/data/index.js +3 -1
  53. package/build/shared/charts/descriptors/data/removeFromContext.js +9 -1
  54. package/build/shared/charts/descriptors/data/resetContext.js +1 -0
  55. package/build/shared/charts/descriptors/index.js +19 -1
  56. package/build/shared/charts/descriptors/knowledgeSearch/knowledgeSearchV2.js +1 -1
  57. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +55 -20
  58. package/build/shared/charts/descriptors/logic/disableSlotFillers.js +1 -1
  59. package/build/shared/charts/descriptors/logic/enableSlotFillers.js +1 -1
  60. package/build/shared/charts/descriptors/logic/resetState.js +1 -0
  61. package/build/shared/charts/descriptors/logic/setState.js +2 -1
  62. package/build/shared/charts/descriptors/logic/setTranslation.js +3 -1
  63. package/build/shared/charts/descriptors/logic/switchLocale.js +1 -0
  64. package/build/shared/charts/descriptors/logic/think.js +3 -1
  65. package/build/shared/charts/descriptors/logic/thinkV2.js +113 -4
  66. package/build/shared/charts/descriptors/message/question/question.js +50 -5
  67. package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -2
  68. package/build/shared/charts/descriptors/nlu/cleanText.js +2 -1
  69. package/build/shared/charts/descriptors/nlu/executeCognigyNLU.js +1 -1
  70. package/build/shared/charts/descriptors/nlu/fuzzySearch.js +24 -2
  71. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/generativeSlotFiller.js +1 -1
  72. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/generativeSlotFillerFallback.js +1 -1
  73. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/generativeSlotFillerSuccess.js +1 -1
  74. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +18 -9
  75. package/build/shared/charts/descriptors/nlu/matchPattern.js +1 -1
  76. package/build/shared/charts/descriptors/nlu/regexSlotFiller.js +1 -1
  77. package/build/shared/charts/descriptors/service/GPTConversation.js +1 -1
  78. package/build/shared/charts/descriptors/service/GPTPrompt.js +70 -41
  79. package/build/shared/charts/descriptors/service/LLMEntityExtract.js +12 -3
  80. package/build/shared/charts/descriptors/service/aiAgent/aiAgentHandover.js +92 -0
  81. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +1146 -0
  82. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobDefault.js +31 -0
  83. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +139 -0
  84. package/build/shared/charts/descriptors/service/aiAgent/aiAgentToolAnswer.js +120 -0
  85. package/build/shared/charts/descriptors/service/aiAgent/helper.js +222 -0
  86. package/build/shared/charts/descriptors/service/handoverV2.js +140 -2
  87. package/build/shared/charts/descriptors/service/httpRequest.js +35 -2
  88. package/build/shared/charts/descriptors/service/index.js +11 -1
  89. package/build/shared/charts/descriptors/transcripts/addTranscriptStep.js +413 -0
  90. package/build/shared/charts/descriptors/transcripts/getTranscript.js +104 -0
  91. package/build/shared/charts/descriptors/transcripts/index.js +8 -0
  92. package/build/shared/charts/descriptors/voice/mappers/base.mapper.js +20 -0
  93. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +227 -72
  94. package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +6 -9
  95. package/build/shared/charts/descriptors/voice/nodes/bargeIn.js +2 -0
  96. package/build/shared/charts/descriptors/voice/nodes/continuousAsr.js +5 -4
  97. package/build/shared/charts/descriptors/voice/nodes/dtmf.js +2 -0
  98. package/build/shared/charts/descriptors/voice/nodes/muteSpeechInput.js +1 -0
  99. package/build/shared/charts/descriptors/voice/nodes/noUserInput.js +2 -0
  100. package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +2 -0
  101. package/build/shared/charts/descriptors/voice/nodes/transfer.js +2 -0
  102. package/build/shared/charts/descriptors/voicegateway/nodes/callRecording.js +8 -1
  103. package/build/shared/charts/descriptors/voicegateway/nodes/handover.js +4 -2
  104. package/build/shared/charts/descriptors/voicegateway/nodes/hangup.js +4 -2
  105. package/build/shared/charts/descriptors/voicegateway/nodes/helper/utils.js +14 -0
  106. package/build/shared/charts/descriptors/voicegateway/nodes/playURL.js +4 -5
  107. package/build/shared/charts/descriptors/voicegateway/nodes/sendMessage.js +8 -1
  108. package/build/shared/charts/descriptors/voicegateway/nodes/sendMetaData.js +7 -3
  109. package/build/shared/charts/descriptors/voicegateway/nodes/setSessionParams.js +8 -1
  110. package/build/shared/charts/descriptors/voicegateway2/nodes/dtmf.js +2 -0
  111. package/build/shared/charts/descriptors/voicegateway2/nodes/hangup.js +2 -0
  112. package/build/shared/charts/descriptors/voicegateway2/nodes/muteSpeechInput.js +3 -0
  113. package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +1 -0
  114. package/build/shared/charts/descriptors/voicegateway2/nodes/record.js +1 -0
  115. package/build/shared/charts/descriptors/voicegateway2/nodes/refer.js +1 -0
  116. package/build/shared/charts/descriptors/voicegateway2/nodes/sendMetadata.js +1 -0
  117. package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +311 -10
  118. package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +25 -9
  119. package/build/shared/charts/descriptors/voicegateway2/utils/helper.js +2 -2
  120. package/build/shared/charts/helpers/generativeAI/rephraseSentenceWithAi.js +4 -2
  121. package/build/shared/constants.js +16 -1
  122. package/build/shared/handoverClients/interfaces/THandoverEventType.js +2 -0
  123. package/build/shared/helper/logFullConfigToDebugMode.js +30 -0
  124. package/build/shared/helper/nlu/textCleaner.js +4 -2
  125. package/build/shared/interfaces/IEndpointSettings.js +3 -0
  126. package/build/shared/interfaces/IOrganisation.js +1 -0
  127. package/build/shared/interfaces/IProfile.js +2 -0
  128. package/build/shared/interfaces/IProfileSchema.js +4 -0
  129. package/build/shared/interfaces/analytics/IAnalyticsSourceData.js +22 -20
  130. package/build/shared/interfaces/{restAPI/resources/milestone/v2.0/IMilestone_2_0.js → analytics/IGoalAnalytics.js} +1 -1
  131. package/build/shared/interfaces/appsession/ISetAppStateOptions.js +3 -0
  132. package/build/shared/interfaces/appsession/ISetAppStateOverlaySettings.js +3 -0
  133. package/build/shared/interfaces/appsession/ISetAppStateOverlaySettingsMetaData.js +3 -0
  134. package/build/shared/interfaces/debugEvents/TDebugEventMessagePayload.js +3 -0
  135. package/build/shared/interfaces/debugEvents/TDebugEventType.js +2 -0
  136. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +51 -70
  137. package/build/shared/interfaces/{restAPI/resources/journey/v2.0/IJourney_2_0.js → generativeAI/tools.js} +1 -1
  138. package/build/shared/interfaces/handover.js +45 -2
  139. package/build/shared/interfaces/license.js +3 -2
  140. package/build/shared/interfaces/messageAPI/endpoints.js +8 -1
  141. package/build/shared/interfaces/messageAPI/handover.js +22 -1
  142. package/build/shared/interfaces/resources/IAiAgent.js +52 -0
  143. package/build/shared/interfaces/resources/IAuditEvent.js +2 -1
  144. package/build/shared/interfaces/resources/IConnection.js +1 -0
  145. package/build/shared/interfaces/resources/IEndpoint.js +2 -2
  146. package/build/shared/interfaces/resources/{IMilestone.js → IGoal.js} +15 -15
  147. package/build/shared/interfaces/resources/ILargeLanguageModel.js +52 -4
  148. package/build/shared/interfaces/resources/INodeDescriptorSet.js +15 -0
  149. package/build/shared/interfaces/resources/TResourceType.js +16 -8
  150. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  151. package/build/shared/interfaces/resources/settings/IAgentSettings.js +12 -7
  152. package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +9 -1
  153. package/build/shared/interfaces/resources/settings/IKnowledgeAISettings.js +18 -0
  154. package/build/shared/interfaces/resources/settings/index.js +4 -1
  155. package/build/shared/interfaces/restAPI/administration/organisations/v2.0/IReadCollectionsToBeDeletedRest_2_0.js +0 -1
  156. package/build/shared/interfaces/restAPI/administration/organisations/v2.0/IReadOrganisationKnowledgeChunksCountRest_2_0.js +3 -0
  157. package/build/shared/interfaces/restAPI/administration/user/v2.0/IGetPinnedResources_2_0.js +3 -0
  158. package/build/shared/interfaces/restAPI/{resources/journey/v2.0/IReadJourneyRest_2_0.js → administration/user/v2.0/IPinResourceRest_2_0.js} +1 -1
  159. package/build/shared/interfaces/restAPI/operations/nlu/v2.0/IGenerateNluScoresRest_2_0.js +57 -0
  160. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentHiringTemplate_2_0.js +32 -0
  161. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgent_2_0.js +3 -0
  162. package/build/shared/interfaces/restAPI/resources/{journey/v2.0/IIndexJourneysRest_2_0.js → aiAgent/v2.0/ICreateAiAgentRest_2_0.js} +1 -1
  163. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IDeleteAiAgentRest_2_0.js +3 -0
  164. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentHiringTemplates_2_0.js +3 -0
  165. package/build/shared/interfaces/restAPI/resources/{journey/v2.0/IJourneyStep_2_0.js → aiAgent/v2.0/IHireAiAgent_2_0.js} +1 -1
  166. package/build/shared/interfaces/restAPI/resources/{journey/v2.0/IJourneyIndexItem_2_0.js → aiAgent/v2.0/IIndexAiAgentRest_2_0.js} +1 -1
  167. package/build/shared/interfaces/restAPI/resources/{journey/v2.0/IJourneyProgress_2_0.js → aiAgent/v2.0/IReadAiAgentRest_2_0.js} +1 -1
  168. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IUpdateAiAgentRest_2_0.js +3 -0
  169. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IValidateAiAgentNameRest_2_0.js +3 -0
  170. package/build/shared/interfaces/restAPI/resources/goal/v2.0/ICloneGoalRest_2_0.js +3 -0
  171. package/build/shared/interfaces/restAPI/resources/goal/v2.0/ICreateGoalRest_2_0.js +3 -0
  172. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IDeleteGoalRest_2_0.js +3 -0
  173. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoalIndexItem_2_0.js +3 -0
  174. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoalStepMetric_2_0.js +3 -0
  175. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoalStep_2_0.js +3 -0
  176. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IGoal_2_0.js +3 -0
  177. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IIndexGoalsRest_2_0.js +3 -0
  178. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IReadGoalRest_2_0.js +3 -0
  179. package/build/shared/interfaces/restAPI/resources/goal/v2.0/IUpdateGoalRest_2_0.js +3 -0
  180. package/build/shared/interfaces/restAPI/resources/goal/v2.0/index.js +3 -0
  181. package/build/shared/interfaces/restAPI/resources/largeLanguageModel/v2.0/IAvailableModelsForLLMProvider_2_0 .js +18 -0
  182. package/build/shared/interfaces/restAPI/resources/largeLanguageModel/v2.0/IGetAvailableModelsForLLMRest_2_0 .js +3 -0
  183. package/build/shared/interfaces/security/IPermission.js +6 -2
  184. package/build/shared/interfaces/security/IPinnedResource.js +3 -0
  185. package/build/shared/interfaces/security/IRole.js +2 -0
  186. package/build/shared/interfaces/security/ISystemCapabilities.js +3 -0
  187. package/build/shared/interfaces/security/index.js +1 -1
  188. package/build/shared/interfaces/trainer/ITrainerRecord.js +2 -2
  189. package/build/shared/interfaces/transcripts/transcripts.js +33 -0
  190. package/build/shared/interfaces/user.js +1 -1
  191. package/package.json +2 -2
  192. package/types/index.d.ts +2332 -1041
  193. package/build/shared/interfaces/journeys/IJourney.js +0 -83
  194. package/build/shared/interfaces/journeys/IJourneyProgress.js +0 -40
  195. package/build/shared/interfaces/journeys/IJourneyTrackEvent.js +0 -35
  196. package/build/shared/interfaces/journeys/index.js +0 -14
  197. package/build/shared/interfaces/license.js.map +0 -1
  198. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IJourneyTrackEvents_2_0.js +0 -3
  199. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IReadJourneyProgressRest_2_0.js +0 -3
  200. package/build/shared/interfaces/restAPI/resources/journey/v2.0/ITrackJourneyEventRest_2_0.js +0 -3
  201. package/build/shared/interfaces/restAPI/resources/journey/v2.0/IUpdateSelectedJourneyRest_2_0.js +0 -3
  202. package/build/shared/interfaces/restAPI/resources/knowledgeSearchIndex/v2.0/ICreateKnowledgeSearchIndexRest_2_0.js +0 -3
  203. package/build/shared/interfaces/restAPI/resources/knowledgeSearchIndex/v2.0/IDeleteKnowledgeSearchIndexRest_2_0.js +0 -3
  204. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/ICloneMilestoneRest_2_0.js +0 -3
  205. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/ICreateMilestoneRest_2_0.js +0 -3
  206. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IDeleteMilestoneRest_2_0.js +0 -3
  207. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IIndexMilestonesRest_2_0.js +0 -3
  208. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneIndexItem_2_0.js +0 -3
  209. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStepMetric_2_0.js +0 -3
  210. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IMilestoneStep_2_0.js +0 -3
  211. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IReadMilestoneRest_2_0.js +0 -3
  212. package/build/shared/interfaces/restAPI/resources/milestone/v2.0/IUpdateMilestoneRest_2_0.js +0 -3
  213. package/build/test.js +0 -27
  214. /package/build/shared/interfaces/restAPI/{resources/journey/v2.0 → operations}/index.js +0 -0
  215. /package/build/shared/interfaces/restAPI/{resources/knowledgeSearchIndex → operations/nlu}/v2.0/index.js +0 -0
  216. /package/build/shared/interfaces/restAPI/resources/{milestone → aiAgent}/v2.0/index.js +0 -0
@@ -3,6 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.THINK_V2 = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
+ const logic_1 = require("../logic");
7
+ const logFullConfigToDebugMode_1 = require("../../../helper/logFullConfigToDebugMode");
8
+ const errors_1 = require("../../../errors");
6
9
  /**
7
10
  * Node name: 'think'
8
11
  *
@@ -68,6 +71,56 @@ exports.THINK_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
68
71
  value: "intent"
69
72
  }
70
73
  },
74
+ {
75
+ key: "logErrorToSystem",
76
+ label: "UI__NODE_EDITOR__THINK_V2__LOG_ERROR_TO_SYSTEM__LABEL",
77
+ description: "UI__NODE_EDITOR__THINK_V2__LOG_ERROR_TO_SYSTEM__DESCRIPTION",
78
+ type: "toggle",
79
+ defaultValue: false,
80
+ },
81
+ {
82
+ key: "errorHandling",
83
+ type: "select",
84
+ label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__LABEL",
85
+ description: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__DESCRIPTION",
86
+ defaultValue: "continue",
87
+ params: {
88
+ options: [
89
+ {
90
+ label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__OPTIONS__STOP__LABEL",
91
+ value: "stop"
92
+ },
93
+ {
94
+ label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__OPTIONS__CONTINUE__LABEL",
95
+ value: "continue"
96
+ },
97
+ {
98
+ label: "UI__NODE_EDITOR__THINK_V2__HANDLE_ERROR__ERROR__OPTIONS__GOTO__LABEL",
99
+ value: "goto"
100
+ },
101
+ ]
102
+ }
103
+ },
104
+ {
105
+ key: "errorMessage",
106
+ label: "UI__NODE_EDITOR__THINK_V2__ERROR_MESSAGE__LABEL",
107
+ type: "cognigyText",
108
+ description: "UI__NODE_EDITOR__THINK_V2__ERROR_MESSAGE__DESCRIPTION",
109
+ defaultValue: "",
110
+ condition: {
111
+ key: "errorHandling",
112
+ value: "continue"
113
+ }
114
+ },
115
+ {
116
+ key: "errorHandlingGotoTarget",
117
+ type: "flowNode",
118
+ label: "UI__NODE_EDITOR__THINK_V2__ERROR__GOTO_NODE__LABEL",
119
+ condition: {
120
+ key: "errorHandling",
121
+ value: "goto"
122
+ }
123
+ },
71
124
  ],
72
125
  sections: [
73
126
  {
@@ -78,22 +131,78 @@ exports.THINK_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
78
131
  "thinkType",
79
132
  "data"
80
133
  ]
134
+ },
135
+ {
136
+ key: "errors",
137
+ label: "UI__NODE_EDITOR__THINK_V2__SECTIONS__ERRORS__LABEL",
138
+ defaultCollapsed: true,
139
+ fields: [
140
+ "logErrorToSystem",
141
+ "errorHandling",
142
+ "errorMessage",
143
+ "errorHandlingGotoTarget",
144
+ ]
81
145
  }
82
146
  ],
83
147
  form: [
84
148
  { type: "field", key: "text" },
85
149
  { type: "field", key: "intent" },
86
150
  { type: "section", key: "advanced" },
151
+ { type: "section", key: "errors" },
87
152
  ],
88
153
  preview: {
89
154
  key: "text",
90
155
  type: "text",
91
156
  },
92
157
  tags: ["basic", "logic", "recursion", "inject"],
93
- function: async ({ cognigy, config }) => {
94
- const { text, data, intent, thinkType, } = config;
95
- const { api } = cognigy;
96
- if (thinkType === "intent") {
158
+ function: async ({ cognigy, config, nodeId }) => {
159
+ const { text, data, intent, thinkType, errorHandling = "continue", errorHandlingGotoTarget, errorMessage, logErrorToSystem, } = config;
160
+ const { api, input } = cognigy;
161
+ const { traceId } = input;
162
+ (0, logFullConfigToDebugMode_1.logFullConfigToDebugMode)(cognigy, config);
163
+ if ((thinkType === "default" && typeof text !== "string")
164
+ || (thinkType === "intent" && typeof intent !== "string")) {
165
+ const errorDetails = {
166
+ message: "Think input should be string",
167
+ originalInput: JSON.stringify(text || intent)
168
+ };
169
+ if (logErrorToSystem) {
170
+ api.log("error", JSON.stringify(errorDetails));
171
+ }
172
+ api.logDebugError(errorDetails, "UI__DEBUG_MODE__THINK_NODE__ERROR");
173
+ // handle error depending on the settings
174
+ if (errorHandling === "continue") {
175
+ // output the provided error message
176
+ if (errorMessage) {
177
+ api.output(errorMessage, null);
178
+ }
179
+ }
180
+ else if (errorHandling === "goto") {
181
+ if (!errorHandlingGotoTarget) {
182
+ throw new Error("GoTo Target is required");
183
+ }
184
+ const gotoParams = {
185
+ cognigy,
186
+ childConfigs: [],
187
+ nodeId,
188
+ config: {
189
+ flowNode: {
190
+ flow: errorHandlingGotoTarget.flow,
191
+ node: errorHandlingGotoTarget.node,
192
+ },
193
+ injectedText: undefined,
194
+ injectedData: undefined,
195
+ executionMode: "continue",
196
+ absorbContext: false
197
+ }
198
+ };
199
+ await logic_1.GO_TO.function(gotoParams);
200
+ }
201
+ else {
202
+ throw new errors_1.InternalServerError(errorDetails.message, { traceId });
203
+ }
204
+ }
205
+ else if (thinkType === "intent") {
97
206
  api.thinkV2(`cIntent:${intent}`, null);
98
207
  }
99
208
  else if (text || data) {
@@ -324,6 +324,16 @@ DO NOT talk about other topics. Do not offer general assistance.`,
324
324
  value: "llm",
325
325
  }
326
326
  },
327
+ {
328
+ key: "repromptLLMStream",
329
+ label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__STREAM_REPROMPT__LABEL",
330
+ type: "toggle",
331
+ defaultValue: false,
332
+ condition: {
333
+ key: "repromptType",
334
+ value: "llm",
335
+ }
336
+ },
327
337
  {
328
338
  key: "repromptLLMTurns",
329
339
  label: "UI__NODE_EDITOR__SERVICE__QUESTION__QUESTION__FIELDS__TRANSCRIPT_STEPS__LABEL",
@@ -340,6 +350,17 @@ DO NOT talk about other topics. Do not offer general assistance.`,
340
350
  value: "llm",
341
351
  }
342
352
  },
353
+ {
354
+ key: "repromptLLMStreamStopTokens",
355
+ type: "textArray",
356
+ label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__LABEL",
357
+ description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__STREAM_STOP_TOKENS__DESCRIPTION",
358
+ defaultValue: [".", "!", "?", "\\n"],
359
+ condition: {
360
+ key: "repromptLLMStream",
361
+ value: true,
362
+ }
363
+ },
343
364
  {
344
365
  key: "repromptFlowNode",
345
366
  type: "flowNode",
@@ -452,6 +473,9 @@ DO NOT talk about other topics. Do not offer general assistance.`,
452
473
  type: "cognigyText",
453
474
  label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__CONTEXT_KEY__LABEL",
454
475
  defaultValue: "result",
476
+ params: {
477
+ noTranslate: true
478
+ },
455
479
  condition: {
456
480
  key: "storeResultInContext",
457
481
  value: true
@@ -1579,7 +1603,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1579
1603
  },
1580
1604
  ],
1581
1605
  },
1582
- },
1606
+ }
1583
1607
  ].filter(field => !!field),
1584
1608
  sections: [
1585
1609
  {
@@ -1603,6 +1627,8 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1603
1627
  "repromptLLMProvider",
1604
1628
  "repromptLLMPrompt",
1605
1629
  "repromptLLMTurns",
1630
+ "repromptLLMStream",
1631
+ "repromptLLMStreamStopTokens",
1606
1632
  "repromptSay",
1607
1633
  "repromptFlowNode",
1608
1634
  "repromptParseIntents",
@@ -1771,7 +1797,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1771
1797
  "key": "type",
1772
1798
  "value": "date"
1773
1799
  }),
1774
- (0, getRephraseWithAIFields_1.getRephraseWithAISection)(),
1800
+ (0, getRephraseWithAIFields_1.getRephraseWithAISection)()
1775
1801
  ].filter(section => !!section),
1776
1802
  form: [
1777
1803
  { type: "field", key: "type" },
@@ -1806,7 +1832,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1806
1832
  //#endregion DescriptorFields
1807
1833
  function: async ({ cognigy, nodeId, organisationId, config, inputOptions }) => {
1808
1834
  var _a, _b, _c;
1809
- const { say, type, validationMessage, repromptLLMProvider, repromptType, repromptLLMPrompt, repromptLLMTurns, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1835
+ const { say, type, validationMessage, repromptLLMProvider, repromptType = "text", repromptLLMPrompt, repromptLLMTurns, repromptLLMStream, repromptLLMStreamStopTokens, repromptSay, repromptFlowNode, repromptParseIntents, repromptParseKeyphrases, repromptAbsorbContext, validationRepeat, storeResultInContext, contextKey, storeInContactProfile, profileKey, storeDetailedResults, parseResultOnEntry, repromptCondition, maxExecutionDiff, resultLocation, skipRepromptOnIntent, onlyAcceptEscalationIntents, escalateAnswersAction, escalateAnswersThreshold, escalateAnswersGotoTarget, escalateAnswersExecuteTarget, escalateAnswersGotoExecutionMode, escalateAnswersInjectedText, escalateAnswersInjectedData, escalateAnswersMessage, escalateAnswersRepromptPrevention, escalateAnswersOnce, escalateAnswersHandoverText, escalateAnswersRepeatHandoverMessage, escalateAnswersHandoverCancelIntent, escalateAnswersHandoverQuickReply, escalateAnswersHandoverChatwootInboxId, escalateAnswersHandoverLiveAgentInboxId, escalateAnswersHandoverAdditionalCategoryIds, escalateAnswersHandoverSendTranscriptAsFirstMessage, escalateAnswersHandoverSalesforcePrechatEntities, escalateAnswersHandoverSalesforcePrechatDetails, escalateAnswersHandoverGenesysLanguage, escalateAnswersHandoverGenesysSkills, escalateAnswersHandoverGenesysPriority, escalateAnswersHandoverGenesysCustomAttributes, escalateAnswersHandoverEightByEightChannelId, escalateAnswersHandoverEightByEightQueueId, escalateAnswersHandoverEightByEightJSONProps, escalateAnswersHandoverSendResolveEvent, escalateAnswersHandoverResolveBehavior, escalateAnswersAgentAssistInitMessage, escalateAnswersAllowAgentInject, escalateAnswersSendOnActiveEvent, escalateAnswersSendOnQueueEvent, escalateIntentsAction, escalateIntentsValidIntents, escalateIntentsThreshold, escalateIntentsGotoTarget, escalateIntentsExecuteTarget, escalateIntentsGotoExecutionMode, escalateIntentsInjectedText, escalateIntentsInjectedData, escalateIntentsMessage, escalateIntentsHandoverText, escalateIntentsRepeatHandoverMessage, escalateIntentsHandoverCancelIntent, escalateIntentsHandoverQuickReply, escalateIntentsHandoverChatwootInboxId, escalateIntentsHandoverLiveAgentInboxId, escalateIntentsHandoverAdditionalCategoryIds, escalateIntentHandoverSendTranscriptAsFirstMessage, escalateIntentsHandoverSalesforcePrechatEntities, escalateIntentsHandoverSalesforcePrechatDetails, escalateIntentsHandoverGenesysLanguage, escalateIntentsHandoverGenesysSkills, escalateIntentsHandoverGenesysPriority, escalateIntentsHandoverGenesysCustomAttributes, escalateIntentsHandoverEightByEightChannelId, escalateIntentsHandoverEightByEightQueueId, escalateIntentsHandoverEightByEightJSONProps, escalateIntentsRepromptPrevention, escalateIntentsHandoverSendResolveEvent, escalateIntentsHandoverResolveBehavior, escalateIntentsAgentAssistInitMessage, escalateIntentsAllowAgentInject, escalateIntentsSendOnActiveEvent, escalateIntentsSendOnQueueEvent, reconfirmationBehaviour, reconfirmationQuestion, reconfirmationQuestionReprompt, handoverOutput, cleanTextLocale, cleanDisallowedSymbols, additionalAllowedCharacters, additionalSpecialPhrases, resolveSpelledOutNumbers, resolvePhoneticAlphabet, additionalPhoneticAlphabet, replaceSpecialWords, additionalMappedSymbols, resolveSpelledOutAlphabet, resolvePhoneticCounters, contractSingleCharacters, contractNumberGroups, trimResult, runNLUAfterCleaning, overwrittenBaseAnswer } = config;
1810
1836
  const { input, context, profile, api } = cognigy;
1811
1837
  const rephraseWithAIParams = {
1812
1838
  generativeAI_rephraseOutputMode: config.generativeAI_rephraseOutputMode,
@@ -1861,11 +1887,14 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1861
1887
  overwriteAnswer = await api.executeCognigyNLU(overwriteAnswer.text, input.data, input.inputId, { parseIntents: true, parseSlots: true, parseSystemSlots: true, findType: true });
1862
1888
  overwriteAnswer.text = cleanedAnswer;
1863
1889
  }
1890
+ // DEBUG MODE LOGS
1891
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_1 ${overwriteAnswer.text}`, "Applied Answer Preprocessing");
1864
1892
  }
1865
1893
  else if (overwrittenBaseAnswer) {
1866
1894
  // if only overwrittenBaseAnswer was specified, copy the input and overwrite the text property only
1867
1895
  overwriteAnswer = JSON.parse(JSON.stringify(input));
1868
1896
  overwriteAnswer.text = overwrittenBaseAnswer;
1897
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_2 ${overwriteAnswer.text}`, "Overwritten Base Answer");
1869
1898
  }
1870
1899
  // if we're in a subsequent execution or we want to parse results
1871
1900
  // immediately on entry, continue with evaluation
@@ -2006,6 +2035,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2006
2035
  api.setLastExecutionMarker(nodeId, -1);
2007
2036
  api.resetExecutionAmount(nodeId);
2008
2037
  api.deleteSystemContext('activeQuestion');
2038
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_3 '${target}'`, "Skipped");
2009
2039
  return;
2010
2040
  }
2011
2041
  }
@@ -2078,6 +2108,8 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2078
2108
  api.resetExecutionAmount(nodeId);
2079
2109
  api.deleteSystemContext('activeQuestion');
2080
2110
  // #endregion 5.1.4 SetMarkersAndContinue
2111
+ // TODO: We need a toggle for this
2112
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_4 ${typeof result === 'object' ? JSON.stringify(result) : result}`, "Found Result");
2081
2113
  }
2082
2114
  return;
2083
2115
  // #endregion 5.1 AnswerIsValid
@@ -2381,11 +2413,14 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2381
2413
  // #region 5.2.3 Reprompt
2382
2414
  // check if there is an extra condition defined for reprompts and check whether it was truthy
2383
2415
  if (sayReprompt && repromptCondition) {
2384
- sayReprompt = !!api.parseCognigyScriptCondition(repromptCondition);
2416
+ const repromptConditionResult = !!api.parseCognigyScriptCondition(repromptCondition);
2417
+ !repromptConditionResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_6`, "Skipping Reprompt Message");
2418
+ sayReprompt = repromptConditionResult;
2385
2419
  }
2386
2420
  // if we decided to skip the reprompt on intent and there
2387
2421
  // is an intent, don't say it
2388
2422
  if (skipRepromptOnIntent && cognigy.input.intent) {
2423
+ api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_5`, "Skipping Reprompt Message");
2389
2424
  sayReprompt = false;
2390
2425
  }
2391
2426
  // We will only output a reprompt if the user is not in the first execution
@@ -2435,12 +2470,22 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2435
2470
  temperature: 0.7,
2436
2471
  timeoutInMs: 5000,
2437
2472
  useCase: "promptNode",
2473
+ stream: repromptLLMStream,
2474
+ streamStopTokens: repromptLLMStreamStopTokens,
2475
+ streamOnDataHandler: (text) => {
2476
+ text = text && text.trim();
2477
+ if (text) {
2478
+ api.output(text, null);
2479
+ }
2480
+ }
2438
2481
  };
2439
2482
  if (repromptLLMProvider && repromptLLMProvider !== "default") {
2440
2483
  data["llmProviderReferenceId"] = repromptLLMProvider;
2441
2484
  }
2442
2485
  const repromptMessage = await api.runGenerativeAIPrompt(data, "gptPromptNode");
2443
- await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2486
+ if (!repromptLLMStream) {
2487
+ await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [repromptMessage] } }, rephraseWithAIParams) });
2488
+ }
2444
2489
  break;
2445
2490
  default: // this is also "text"
2446
2491
  await say_1.SAY.function({ cognigy, childConfigs: [], nodeId, organisationId, config: Object.assign({ handoverOutput, say: { type: "text", text: [validationMessage] } }, rephraseWithAIParams) });
@@ -2,12 +2,14 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.validateQuestionAnswer = void 0;
4
4
  const validateQuestionAnswer = (cognigy, config) => {
5
- const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents, } = config;
5
+ const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents } = config;
6
6
  const { input, api } = cognigy;
7
7
  let isValid = true;
8
8
  // check if there is an extra condition defined and check whether it was truthy
9
9
  if (additionalValidation) {
10
- isValid = !!api.parseCognigyScriptCondition(additionalValidation);
10
+ const additionalValidationResult = !!api.parseCognigyScriptCondition(additionalValidation);
11
+ !additionalValidationResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_7`, "Invalid Answer");
12
+ isValid = additionalValidationResult;
11
13
  }
12
14
  if (escalateIntentsAction && escalateIntentsAction !== "none") {
13
15
  // Intent matched, intent score lte escalate threshold and intent is a
@@ -8,7 +8,7 @@ exports.CLEAN_TEXT = (0, createNodeDescriptor_1.createNodeDescriptor)({
8
8
  type: "cleanText",
9
9
  defaultLabel: "Clean Text",
10
10
  summary: "UI__NODE_EDITOR__NLU__CLEAN_TEXT__DEFAULT_SUMMARY",
11
- tags: ["nlu", "clean", "cleaning", "parser", "parsing"],
11
+ tags: ["ai", "nlu", "clean", "cleaning", "parser", "parsing"],
12
12
  preview: {
13
13
  key: "textToClean",
14
14
  type: "text"
@@ -84,6 +84,7 @@ exports.CLEAN_TEXT = (0, createNodeDescriptor_1.createNodeDescriptor)({
84
84
  localeToUse = cleanTextLocale;
85
85
  }
86
86
  const result = new textCleaner_1.TextCleaner(localeToUse, additionalAllowedCharacters, additionalMappedSymbols, additionalSpecialPhrases, additionalPhoneticAlphabet).cleanAll(textToClean, options, (_b = input === null || input === void 0 ? void 0 : input.nlu) === null || _b === void 0 ? void 0 : _b.detailedSlots);
87
+ result !== textToClean && api.logDebugMessage(`UI__DEBUG_MODE__CLEAN_TEXT__MESSAGE ${textToClean}<br>UI__DEBUG_MODE__CLEAN_TEXT__MESSAGE_2 ${result}`);
87
88
  if (storeLocation === "context") {
88
89
  api.addToContext(contextKeyToStoreResult, result, "simple");
89
90
  }
@@ -127,7 +127,7 @@ exports.EXECUTE_COGNIGY_NLU = (0, createNodeDescriptor_1.createNodeDescriptor)({
127
127
  { type: "field", key: "inputKey" },
128
128
  { type: "section", key: "advanced" }
129
129
  ],
130
- tags: ["nlu"],
130
+ tags: ["ai", "nlu"],
131
131
  function: async ({ cognigy, config }) => {
132
132
  const { api, input } = cognigy;
133
133
  const { text, data, mode, contextKey, inputKey, parseIntents, parseSlots, parseSystemSlots, findType, processDefaultReply } = config;
@@ -14,7 +14,7 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
14
14
  key: "searchPattern",
15
15
  type: "text"
16
16
  },
17
- tags: ["nlu", "fuzzy", "search"],
17
+ tags: ["ai", "nlu", "fuzzy", "search"],
18
18
  fields: [
19
19
  {
20
20
  key: "searchPattern",
@@ -211,7 +211,8 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
211
211
  { type: "section", key: "storageOption" }
212
212
  ],
213
213
  function: async ({ cognigy, config }) => {
214
- const { api } = cognigy;
214
+ var _a, _b, _c, _d, _e, _f, _g;
215
+ const { api, input } = cognigy;
215
216
  const { searchPattern, items, distance, findAllMatches, ignoreLocation, includeMatches, includeScore, isCaseSensitive, location, minMatchCharLength, shouldSort, threshold, storeLocation, inputKey, contextKey } = config;
216
217
  if (!searchPattern)
217
218
  throw new Error("No search pattern provided.");
@@ -244,11 +245,31 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
244
245
  }
245
246
  });
246
247
  }
248
+ if (result && result.length > 0 && ((_a = result === null || result === void 0 ? void 0 : result[0]) === null || _a === void 0 ? void 0 : _a.item) && (input.endpointType === "adminconsole" || api.getMetadata().isFollowSessionActive)) {
249
+ let debugMessage = `Found ${result.length} match${result.length > 1 ? 'es' : ''}.<br><br>`;
250
+ if (result.length > 3) {
251
+ debugMessage += `Top 3:<br>`;
252
+ }
253
+ if (result.length >= 1 && ((_b = result === null || result === void 0 ? void 0 : result[0]) === null || _b === void 0 ? void 0 : _b.item) && ((_c = result === null || result === void 0 ? void 0 : result[0]) === null || _c === void 0 ? void 0 : _c.score)) {
254
+ debugMessage += `1. ${result[0].item} (score: ${result[0].score})<br>`;
255
+ }
256
+ if (result.length >= 2 && ((_d = result === null || result === void 0 ? void 0 : result[1]) === null || _d === void 0 ? void 0 : _d.item) && ((_e = result === null || result === void 0 ? void 0 : result[1]) === null || _e === void 0 ? void 0 : _e.score)) {
257
+ debugMessage += `2. ${result[1].item} (score: ${result[1].score})<br>`;
258
+ }
259
+ if (result.length >= 3 && ((_f = result === null || result === void 0 ? void 0 : result[2]) === null || _f === void 0 ? void 0 : _f.item) && ((_g = result === null || result === void 0 ? void 0 : result[2]) === null || _g === void 0 ? void 0 : _g.score)) {
260
+ debugMessage += `3. ${result[2].item} (score: ${result[2].score})`;
261
+ }
262
+ api.logDebugMessage(debugMessage, "Result Found");
263
+ }
264
+ else {
265
+ api.logDebugMessage("UI__DEBUG_MODE__FUZZY_SEARCH__NO_RESULTS", "No Results");
266
+ }
247
267
  }
248
268
  else {
249
269
  result = {
250
270
  "error": `Source data file size limit (${getFuzzySearchMaxObjectSizeInBytes()} bytes) exceeded`
251
271
  };
272
+ api.logDebugError(`Source data file size limit (${getFuzzySearchMaxObjectSizeInBytes()} bytes) exceeded`, "Size Limit Exceeded");
252
273
  }
253
274
  if (storeLocation === "context") {
254
275
  api.addToContext(contextKey, result, "simple");
@@ -264,6 +285,7 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
264
285
  else {
265
286
  api.addToInput(inputKey, error);
266
287
  }
288
+ api.logDebugError(error.message, "Error");
267
289
  }
268
290
  }
269
291
  });
@@ -129,7 +129,7 @@ exports.GENERATIVE_SLOT_FILLER = (0, createNodeDescriptor_1.createNodeDescriptor
129
129
  { type: "section", key: "storage" },
130
130
  ],
131
131
  appearance: {},
132
- tags: ["nlu"],
132
+ tags: ["ai", "nlu"],
133
133
  function: async ({ cognigy, config, childConfigs, nodeId, organisationId }) => {
134
134
  var _a;
135
135
  const { api, lastConversationEntries } = cognigy;
@@ -22,6 +22,6 @@ exports.GENERATIVE_SLOT_FILLER_FALLBACK = (0, createNodeDescriptor_1.createNodeD
22
22
  movable: false,
23
23
  placement: {},
24
24
  },
25
- tags: ["nlu"],
25
+ tags: ["ai", "nlu"],
26
26
  });
27
27
  //# sourceMappingURL=generativeSlotFillerFallback.js.map
@@ -22,6 +22,6 @@ exports.GENERATIVE_SLOT_FILLER_SUCCESS = (0, createNodeDescriptor_1.createNodeDe
22
22
  movable: false,
23
23
  placement: {},
24
24
  },
25
- tags: ["nlu"],
25
+ tags: ["ai", "nlu"],
26
26
  });
27
27
  //# sourceMappingURL=generativeSlotFillerSuccess.js.map
@@ -135,7 +135,14 @@ exports.createLastUserInputString = createLastUserInputString;
135
135
  * @param cognigy the cognigy object (input, api, etc)
136
136
  */
137
137
  const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => {
138
- const { api } = cognigy;
138
+ var _a, _b, _c, _d;
139
+ const { api, input } = cognigy;
140
+ if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
141
+ // only return logs if in interaction panel or following session
142
+ return;
143
+ }
144
+ // stringify the response if it is an object
145
+ const responseOutputFormatted = typeof response === "object" ? JSON.stringify(response.result || response, null, 4) : response;
139
146
  // debug logs are only processed for the interaction panel
140
147
  if (debugLogRequestAndCompletion) {
141
148
  try {
@@ -143,16 +150,15 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
143
150
  let completionTokenMessage = "";
144
151
  if (debugLogTokenCount) {
145
152
  if (prompt) {
146
- const requestTokens = await api.countGPTTokens(prompt);
153
+ const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
147
154
  requestTokenMessage = ` (${requestTokens} Tokens)`;
148
155
  }
149
156
  if (response) {
150
- const message = response.result || response;
151
- const completionTokens = await api.countGPTTokens(message);
157
+ const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
152
158
  completionTokenMessage = ` (${completionTokens} Tokens)`;
153
159
  }
154
160
  }
155
- api.log('debug', `${label} - Request${requestTokenMessage}: '${prompt}' - Completion${completionTokenMessage}: '${response}'`);
161
+ api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER");
156
162
  }
157
163
  catch (err) { }
158
164
  }
@@ -160,10 +166,13 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
160
166
  try {
161
167
  let requestTokens = 0;
162
168
  let completionTokens = 0;
163
- requestTokens = prompt && await api.countGPTTokens(prompt);
164
- completionTokens = response && await api.countGPTTokens(response);
165
- const completionMessage = response ? ` - Completion Tokens: ${completionTokens} - Total Tokens: ${requestTokens + completionTokens}` : "";
166
- api.log('debug', `${label} - Request Tokens: ${requestTokens} ${completionMessage}`);
169
+ requestTokens = (_c = response.tokenUsage) === null || _c === void 0 ? void 0 : _c.inputTokens;
170
+ completionTokens = (_d = response.tokenUsage) === null || _d === void 0 ? void 0 : _d.outputTokens;
171
+ const requestTokenMessage = requestTokens || "unknown";
172
+ const completionTokenMessage = completionTokens || "unknown";
173
+ const totalTokens = (requestTokens + completionTokens) || "unknown";
174
+ const completionMessage = response ? `<br>UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__COMPLETION_TOKENS: ${completionTokenMessage}<br>UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__TOTAL_TOKENS: ${totalTokens}` : "";
175
+ api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__REQUEST_TOKENS: ${requestTokenMessage}${completionMessage}`, "UI__DEBUG_MODE__LLM_PROMPT__TOKEN_COUNT__HEADER");
167
176
  }
168
177
  catch (err) { }
169
178
  }
@@ -11,7 +11,7 @@ exports.MATCH_PATTERN = (0, createNodeDescriptor_1.createNodeDescriptor)({
11
11
  type: "text"
12
12
  },
13
13
  summary: "UI__NODE_EDITOR__NLU__MATCH_PATTERN__SUMMARY",
14
- tags: ["nlu", "pattern", "patterns"],
14
+ tags: ["ai", "nlu", "pattern", "patterns"],
15
15
  fields: [
16
16
  {
17
17
  key: "patterns",
@@ -36,7 +36,7 @@ exports.REGEX_SLOT_FILLER = (0, createNodeDescriptor_1.createNodeDescriptor)({
36
36
  label: "UI__NODE_EDITOR__NLU__REGEX_SLOT_FILLER__FIELDS__SLOT__LABEL"
37
37
  },
38
38
  ],
39
- tags: ["nlu", "regular", "expression", "slot"],
39
+ tags: ["ai", "nlu", "regular", "expression", "slot"],
40
40
  function: async (regexSlotFillerParams) => {
41
41
  const { cognigy, config } = regexSlotFillerParams;
42
42
  const { api } = cognigy;
@@ -379,7 +379,7 @@ exports.GPT_CONVERSATION = (0, createNodeDescriptor_1.createNodeDescriptor)({
379
379
  { type: "section", key: "advanced" },
380
380
  { type: "section", key: "timeouts" }
381
381
  ],
382
- tags: ["service"],
382
+ tags: ["ai"],
383
383
  function: async ({ cognigy, config }) => {
384
384
  const { api, context, input } = cognigy;
385
385
  const { debug, timeoutMessage } = config;