@cognigy/rest-api-client 2025.16.0 → 2025.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
  3. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
  4. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
  5. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
  6. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
  7. package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  8. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  9. package/build/shared/generativeAI/getPrompt.js +75 -0
  10. package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
  11. package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
  12. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  13. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  14. package/build/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  15. package/build/test.js +39 -0
  16. package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
  17. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
  18. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
  19. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
  20. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
  21. package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  22. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  23. package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
  24. package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
  25. package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
  26. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  27. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  28. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  29. package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
  30. package/dist/esm/test.js +39 -0
  31. package/package.json +1 -1
  32. package/types/index.d.ts +42 -19
package/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ # 2025.17.0
2
+ Released: August 21th, 2025
3
+
4
+ Released state of package up to date with Cognigy.AI v2025.17.0
5
+
1
6
  # 2025.16.0
2
7
  Released: August 05th, 2025
3
8
 
@@ -60,12 +60,22 @@ exports.DEBUG_MESSAGE = (0, createNodeDescriptor_1.createNodeDescriptor)({
60
60
  function: async ({ cognigy, config }) => {
61
61
  const { api } = cognigy;
62
62
  const { level, message, header } = config;
63
- if (level && message) {
63
+ let messageToOutput = message;
64
+ //Atp message can be of type any since cognigyScript can return any type
65
+ // whereas logDebugMessage expects a string or object
66
+ // so we need to change the type of message to string if not string or object
67
+ if (message === undefined || message === null) {
68
+ return;
69
+ }
70
+ else if (typeof message !== "string" && typeof message !== "object") {
71
+ messageToOutput = JSON.stringify(message);
72
+ }
73
+ if (level) {
64
74
  if (level === "info") {
65
- api.logDebugMessage(message, header);
75
+ api.logDebugMessage(messageToOutput, header);
66
76
  }
67
77
  if (level === "error") {
68
- api.logDebugError(message, header);
78
+ api.logDebugError(messageToOutput, header);
69
79
  }
70
80
  }
71
81
  }
@@ -1,11 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.SEARCH_EXTRACT_OUTPUT = void 0;
3
+ exports.getContextAwareUserQueryRephrasingPromptParser = exports.SEARCH_EXTRACT_OUTPUT = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
6
  const logic_1 = require("../logic");
7
7
  const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
8
8
  const crypto_1 = require("crypto");
9
+ const getPrompt_1 = require("../../../generativeAI/getPrompt");
9
10
  const errors_1 = require("../../../errors");
10
11
  /**
11
12
  * Returns the simplified english name for a language given a language code
@@ -549,7 +550,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
549
550
  },
550
551
  tags: ["ai", "knowledgeSearch", "knowledge", "search"],
551
552
  function: async (knowledgeSearchParams) => {
552
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
553
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
553
554
  const { cognigy, config, nodeId } = knowledgeSearchParams;
554
555
  const { input, api } = cognigy;
555
556
  const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
@@ -576,53 +577,24 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
576
577
  // check if follow up detection is active and if yes, handle accordingly
577
578
  // this is "context aware search"
578
579
  if (followUpDetection === "transcript") {
579
- let prompt;
580
- let lastRoundTrip;
581
- // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
582
- followUpDetectionSteps = followUpDetectionSteps || 2;
583
580
  // check whether we're in an flow execution that's not the first
584
581
  // as it doesn't make sense to check for follow ups in the first execution
585
582
  if (input.execution > 1) {
583
+ // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
584
+ followUpDetectionSteps = followUpDetectionSteps || 2;
586
585
  // always remember the last thing the user said (needed later)
587
- lastRoundTrip = (_b = cognigy
588
- .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
589
- // if follow up detection is set to 2 or more, we use the conversation transcript
590
- // as reference. Start at the second entry, because the first one is the current
591
- const recentConversation = (_c = cognigy
592
- .lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
593
- prompt = `Below is the transcript of a conversation:
594
- ${recentConversation}
595
- USER: ${searchString}
596
- Does the last USER input refer to the conversation before?
597
- Answer with "true" or "false". Answer:`;
598
- let promptResponse;
599
- // set the detailed results to true to get the token usage
600
- const returnDetailedResults = true;
586
+ const chatHistory = (_b = cognigy
587
+ .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(0, followUpDetectionSteps + 1).reverse();
601
588
  try {
602
- const firstFollowUpResponse = await api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
603
- promptResponse = firstFollowUpResponse.result;
589
+ const promptData = {
590
+ // set the detailed results to true to get the token usage
591
+ detailedResults: true
592
+ };
593
+ const rephrasedUserQueryResponse = await api.runGenerativeAIPromptForUseCase(promptData, "answerExtraction", "contextAwareUserQueryRephrasing", getContextAwareUserQueryRephrasingPromptParser(chatHistory));
594
+ const promptResponse = rephrasedUserQueryResponse.result;
604
595
  // if we're in adminconsole, process debugging options
605
- (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, firstFollowUpResponse, debugLogTokenCount, false, cognigy);
606
- // check if LLM thinks the input was a follow up
607
- if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
608
- prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
609
-
610
- Example:
611
- Context:
612
- USER: Where is Germany?
613
- BOT: Germany is in Europe.
614
- Question: Is that a continent?
615
- New: Is Europe a continent?
616
-
617
- Task:
618
- Context:
619
- ${lastRoundTrip}
620
- Question: ${searchString}
621
- New: `;
622
- const secondFollowUpResponse = await api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
623
- promptResponse = secondFollowUpResponse.result;
624
- // if we're in adminconsole, process debugging options
625
- (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection 2", prompt, secondFollowUpResponse, debugLogTokenCount, false, cognigy);
596
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, rephrasedUserQueryResponse, debugLogTokenCount, false, cognigy);
597
+ if ((promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase()) !== "false") {
626
598
  // the actual search string to now use is the rewritten question
627
599
  actualSearchString = promptResponse;
628
600
  api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
@@ -745,7 +717,7 @@ New: `;
745
717
  // Perform knowledge search
746
718
  try {
747
719
  // Set understood to true so that the interaction doesn't look false in our analytics
748
- (_d = api.setAnalyticsData) === null || _d === void 0 ? void 0 : _d.call(api, "understood", "true");
720
+ (_c = api.setAnalyticsData) === null || _c === void 0 ? void 0 : _c.call(api, "understood", "true");
749
721
  input.understood = true;
750
722
  const knowledgeSearchResponse = await api.knowledgeSearch(data);
751
723
  (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
@@ -778,7 +750,7 @@ New: `;
778
750
  }
779
751
  // #endregion 1 Perform Search
780
752
  // #region 2 Perform Answer Extraction
781
- let documents = (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
753
+ let documents = (_d = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _d === void 0 ? void 0 : _d.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
782
754
  const replacedUserInput = input.text + (actualSearchString !== input.text ? ` possibly meaning "${actualSearchString}"` : "");
783
755
  prompt = prompt.replace(/@userInput/g, replacedUserInput);
784
756
  prompt = prompt.replace(/@foundDocuments/g, documents);
@@ -983,7 +955,7 @@ New: `;
983
955
  {
984
956
  "separator": true,
985
957
  "type": "TextBlock",
986
- "text": (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0].text,
958
+ "text": (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e[0].text,
987
959
  "wrap": true,
988
960
  "spacing": "Padding"
989
961
  }
@@ -1004,7 +976,7 @@ New: `;
1004
976
  "version": "1.6"
1005
977
  };
1006
978
  // @ts-ignore
1007
- if ((_j = (_h = (_g = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.chunkMetaData) === null || _j === void 0 ? void 0 : _j.url) {
979
+ if ((_h = (_g = (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0]) === null || _g === void 0 ? void 0 : _g.chunkMetaData) === null || _h === void 0 ? void 0 : _h.url) {
1008
980
  ADAPTIVE_CARD_RESULT.body[2].items[0].columns[1].items.push({
1009
981
  "type": "ActionSet",
1010
982
  "actions": [
@@ -1012,7 +984,7 @@ New: `;
1012
984
  "type": "Action.OpenUrl",
1013
985
  "title": "Open Source",
1014
986
  // @ts-ignore
1015
- "url": (_k = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _k === void 0 ? void 0 : _k[0].chunkMetaData.url
987
+ "url": (_j = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _j === void 0 ? void 0 : _j[0].chunkMetaData.url
1016
988
  }
1017
989
  ],
1018
990
  "separator": true
@@ -1042,7 +1014,7 @@ New: `;
1042
1014
  await api.output(promptResponse, null);
1043
1015
  }
1044
1016
  else if (mainPromptResponse.finishReason) {
1045
- (_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, "", {
1017
+ (_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, "", {
1046
1018
  _cognigy: {
1047
1019
  _preventTranscript: true,
1048
1020
  _messageId,
@@ -1067,4 +1039,31 @@ New: `;
1067
1039
  }
1068
1040
  }
1069
1041
  });
1042
+ /**
1043
+ * Parses the prompt for the context-aware user query rephrasing.
1044
+ * It replaces the "@@chatHistory" variable with the chat history messages.
1045
+ * It replaces the "@@userQuery" variable with the last user message.
1046
+ *
1047
+ * @param chatHistory - The chat history to be used for context.
1048
+ * @return A function that takes a raw prompt and returns the modified prompt.
1049
+ */
1050
+ function getContextAwareUserQueryRephrasingPromptParser(chatHistory) {
1051
+ return (rawPrompt) => {
1052
+ if ((0, getPrompt_1.isOpenAIChatPrompt)(rawPrompt)) {
1053
+ const modifiedPrompt = [...rawPrompt];
1054
+ for (const message of chatHistory) {
1055
+ const role = message.source === "user" ? "user" : "assistant";
1056
+ modifiedPrompt.push({
1057
+ role,
1058
+ content: message.text
1059
+ });
1060
+ }
1061
+ return modifiedPrompt;
1062
+ }
1063
+ else {
1064
+ throw new errors_1.InternalServerError(`Invalid prompt type for context-aware user query rephrasing. Expected a chat prompt.`);
1065
+ }
1066
+ };
1067
+ }
1068
+ exports.getContextAwareUserQueryRephrasingPromptParser = getContextAwareUserQueryRephrasingPromptParser;
1070
1069
  //# sourceMappingURL=searchExtractOutput.js.map
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.convertChatToPrompt = exports.writeLLMDebugLogs = exports.createLastUserInputString = exports.createLastConversationChatObject = exports.createLastConverationString = exports.createInvalidAnswerPrompt = exports.createQuestionPrompt = exports.createExtractionPrompt = void 0;
3
+ exports.promptToString = exports.convertChatToPrompt = exports.writeLLMDebugLogs = exports.createLastUserInputString = exports.createLastConversationChatObject = exports.createLastConverationString = exports.createInvalidAnswerPrompt = exports.createQuestionPrompt = exports.createExtractionPrompt = void 0;
4
4
  const createExtractionPrompt = (slots, lastConversationEntries) => {
5
5
  const userInput = lastConversationEntries.filter(entry => entry.source === "user").map(entry => "- " + entry.text).join("\n");
6
6
  const conversation = (0, exports.createLastConverationString)(lastConversationEntries);
@@ -159,6 +159,10 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
159
159
  completionTokenMessage = ` (${completionTokens} Tokens)`;
160
160
  }
161
161
  }
162
+ let promptString = prompt;
163
+ if (typeof prompt != "string") {
164
+ promptString = promptToString(prompt);
165
+ }
162
166
  let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
163
167
  let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
164
168
  if (nodeType === "llmPromptV2") {
@@ -166,7 +170,7 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
166
170
  headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
167
171
  }
168
172
  ;
169
- api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
173
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${promptString}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
170
174
  }
171
175
  catch (err) { }
172
176
  }
@@ -208,4 +212,29 @@ const convertChatToPrompt = (chat) => {
208
212
  return prompt;
209
213
  };
210
214
  exports.convertChatToPrompt = convertChatToPrompt;
215
+ /**
216
+ * Converts a TALLPrompts object into a string representation.
217
+ * @param prompt The prompt to convert to a string
218
+ * @returns The string representation of the prompt
219
+ */
220
+ function promptToString(prompt) {
221
+ if ("prompt" in prompt) {
222
+ // TCompletionPrompt
223
+ return prompt.prompt;
224
+ }
225
+ else if ("messages" in prompt) {
226
+ // TChatPrompt
227
+ return prompt.messages
228
+ .map((msg) => `[${msg.role}] ${msg.content}`)
229
+ .join("\n");
230
+ }
231
+ else if (Array.isArray(prompt)) {
232
+ // OpenAIChatMessage[]
233
+ return prompt
234
+ .map((msg) => `[${msg.role}] ${msg.content}`)
235
+ .join("\n");
236
+ }
237
+ return "";
238
+ }
239
+ exports.promptToString = promptToString;
211
240
  //# sourceMappingURL=prompt.js.map
@@ -421,6 +421,13 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
421
421
  step: 0.1
422
422
  }
423
423
  },
424
+ {
425
+ key: "useTextAlternativeForLLM",
426
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
427
+ type: "toggle",
428
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
429
+ defaultValue: true,
430
+ },
424
431
  {
425
432
  key: "logErrorToSystem",
426
433
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
@@ -816,6 +823,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
816
823
  "timeoutInMs",
817
824
  "maxTokens",
818
825
  "temperature",
826
+ "useTextAlternativeForLLM",
819
827
  ],
820
828
  },
821
829
  {
@@ -863,7 +871,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
863
871
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
864
872
  var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
865
873
  const { api, context, input, profile, flowReferenceId } = cognigy;
866
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
874
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
867
875
  try {
868
876
  if (!aiAgent) {
869
877
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -1141,7 +1149,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1141
1149
  const transcript = await api.getTranscript({
1142
1150
  limit: 50,
1143
1151
  rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
1144
- excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT]
1152
+ excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT],
1153
+ useTextAlternativeForLLM,
1145
1154
  });
1146
1155
  // For knowledgeSearch "always", we enhance the user input with the knowledge search response data
1147
1156
  if (knowledgeSearchBehavior === "always" &&
@@ -401,6 +401,13 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
401
401
  ]
402
402
  }
403
403
  },
404
+ {
405
+ key: "useTextAlternativeForLLM",
406
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
407
+ type: "toggle",
408
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
409
+ defaultValue: true,
410
+ },
404
411
  {
405
412
  key: "customModelOptions",
406
413
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
@@ -552,7 +559,8 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
552
559
  "frequencyPenalty",
553
560
  "useStop",
554
561
  "stop",
555
- "seed"
562
+ "seed",
563
+ "useTextAlternativeForLLM",
556
564
  ]
557
565
  },
558
566
  {
@@ -641,7 +649,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
641
649
  var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
642
650
  const { api, input, flowReferenceId } = cognigy;
643
651
  const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
644
- errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
652
+ errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
645
653
  let prompt = config.prompt || "";
646
654
  const { traceId } = input;
647
655
  // check if custom variables are used and if they have a length modifier
@@ -786,7 +794,8 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
786
794
  const transcript = await api.getTranscript({
787
795
  limit: chatTranscriptSteps || 50,
788
796
  rolesWhiteList: [transcripts_1.TranscriptRole.USER, transcripts_1.TranscriptRole.ASSISTANT, transcripts_1.TranscriptRole.TOOL],
789
- excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT]
797
+ excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.ASSISTANT],
798
+ useTextAlternativeForLLM,
790
799
  });
791
800
  llmPromptOptions["transcript"] = transcript;
792
801
  llmPromptOptions["chat"] = [{
@@ -70,6 +70,13 @@ exports.GET_TRANSCRIPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
70
70
  value: "context",
71
71
  }
72
72
  },
73
+ {
74
+ key: "includeTextAlternativeInTranscript",
75
+ type: "toggle",
76
+ label: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__LABEL",
77
+ description: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__DESCRIPTION",
78
+ defaultValue: true,
79
+ },
73
80
  ],
74
81
  sections: [
75
82
  {
@@ -81,18 +88,31 @@ exports.GET_TRANSCRIPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
81
88
  "inputKey",
82
89
  "contextKey",
83
90
  ]
84
- }
91
+ },
92
+ {
93
+ key: "advanced",
94
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
95
+ defaultCollapsed: true,
96
+ fields: [
97
+ "includeTextAlternativeInTranscript",
98
+ ],
99
+ },
85
100
  ],
86
101
  form: [
87
102
  { type: "field", key: "limit" },
88
103
  { type: "section", key: "storage" },
104
+ { type: "section", key: "advanced" },
89
105
  ],
90
106
  tags: ["service", "transcripts"],
91
107
  function: async ({ cognigy, config }) => {
92
108
  var _a;
93
- const { limit, storeLocation, inputKey, contextKey } = config;
109
+ const { limit, storeLocation, inputKey, contextKey, includeTextAlternativeInTranscript } = config;
94
110
  const { api } = cognigy;
95
- const transcript = await api.getTranscript({ limit, excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.AGENT, transcripts_1.TranscriptRole.ASSISTANT] });
111
+ const transcript = await api.getTranscript({
112
+ limit,
113
+ excludeDataOnlyMessagesFilter: [transcripts_1.TranscriptRole.AGENT],
114
+ includeTextAlternativeInTranscript,
115
+ });
96
116
  if (storeLocation === "context") {
97
117
  (_a = api.addToContext) === null || _a === void 0 ? void 0 : _a.call(api, contextKey, transcript, "simple");
98
118
  }
@@ -146,6 +146,9 @@ class SessionConfigMapper extends base_mapper_1.BaseMapper {
146
146
  const timeout = Number(spAsrTimeout || asrTimeout);
147
147
  recognizer.asrTimeout = timeout / 1000 || undefined;
148
148
  }
149
+ else if (asrEnabled === false || spAsrEnabled === false) {
150
+ recognizer.asrTimeout = 0;
151
+ }
149
152
  return recognizer;
150
153
  }
151
154
  isDtmfEnabled(sessionParams, dtmf) {
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getPrompt = exports.isOpenAIChatPrompt = exports.isChatPrompt = exports.isCompletionPrompt = void 0;
4
+ /** Custom Modules */
5
+ const generativeAIPrompts_1 = require("./utils/generativeAIPrompts");
6
+ const internalServerError_1 = require("../errors/internalServerError");
7
+ const isCompletionPrompt = (data) => {
8
+ return typeof data === "object" && data !== null && "prompt" in data;
9
+ };
10
+ exports.isCompletionPrompt = isCompletionPrompt;
11
+ const isChatPrompt = (data) => {
12
+ return typeof data === "object" && data !== null && "messages" in data;
13
+ };
14
+ exports.isChatPrompt = isChatPrompt;
15
+ const isOpenAIChatPrompt = (data) => {
16
+ return Array.isArray(data) &&
17
+ data.every((item) => typeof item === "object" &&
18
+ item !== null &&
19
+ ("role" in item) &&
20
+ ("content" in item) &&
21
+ (item.role === "system" || item.role === "user" || item.role === "assistant") &&
22
+ (typeof item.content === "string"));
23
+ };
24
+ exports.isOpenAIChatPrompt = isOpenAIChatPrompt;
25
+ /**
26
+ * Gets the current prompts for the passed model/useCase
27
+ * @param model - The model to get the prompt for
28
+ * @param useCase - The use case to get the prompt for
29
+ * @param subUseCase - Optional sub-use case to get a specific prompt
30
+ * @param promptParser - Optional function to modify the prompt before returning it
31
+ * @returns {TALLPrompts}
32
+ */
33
+ const getPrompt = (model, useCase, subUseCase, promptParser) => {
34
+ var _a;
35
+ const loggerMeta = {
36
+ module: "getPrompt.ts",
37
+ label: "generativeAI",
38
+ function: "getPrompt",
39
+ model,
40
+ useCase,
41
+ subUseCase
42
+ };
43
+ let modelPrompts = (_a = generativeAIPrompts_1.generativeAIPrompts[`${model}`]) !== null && _a !== void 0 ? _a : generativeAIPrompts_1.generativeAIPrompts["default"];
44
+ if (!modelPrompts) {
45
+ throw new internalServerError_1.InternalServerError(`Neither the model "${model}" nor the default fallback have predefined prompts`, undefined, loggerMeta);
46
+ }
47
+ let prompt = modelPrompts[`${useCase}`];
48
+ // generativeAIPrompts[model] has no prompt for use case, so try to fallback to default prompt
49
+ if (!prompt) {
50
+ modelPrompts = generativeAIPrompts_1.generativeAIPrompts["default"];
51
+ if (!modelPrompts) {
52
+ throw new internalServerError_1.InternalServerError(`The default fallback has no predefined prompts`, undefined, loggerMeta);
53
+ }
54
+ prompt = modelPrompts[`${useCase}`];
55
+ }
56
+ if (!prompt) {
57
+ throw new internalServerError_1.InternalServerError(`Neither the model "${model}" nor the default fallback define a prompt for useCase "${useCase}"`, undefined, loggerMeta);
58
+ }
59
+ if (subUseCase && prompt && typeof prompt === "object" && `${subUseCase}` in prompt) {
60
+ prompt = prompt[`${subUseCase}`];
61
+ }
62
+ if (!prompt) {
63
+ throw new internalServerError_1.InternalServerError(`The prompt defined for the model "${model}" or the default fallback, useCase "${useCase}", and subUseCase "${subUseCase}" is invalid`, undefined, loggerMeta);
64
+ }
65
+ try {
66
+ return promptParser
67
+ ? promptParser(JSON.parse(JSON.stringify(prompt)))
68
+ : JSON.parse(JSON.stringify(prompt));
69
+ }
70
+ catch (error) {
71
+ throw new internalServerError_1.InternalServerError(`Error while parsing prompt for model: ${model} and useCase: ${useCase} and subUseCase: ${subUseCase}`, undefined, Object.assign({ originalError: error }, loggerMeta));
72
+ }
73
+ };
74
+ exports.getPrompt = getPrompt;
75
+ //# sourceMappingURL=getPrompt.js.map