@cognigy/rest-api-client 2025.16.0 → 2025.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/shared/charts/descriptors/data/code.js +1 -1
  3. package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
  4. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
  5. package/build/shared/charts/descriptors/logic/goTo.js +2 -2
  6. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
  7. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
  8. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
  9. package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  10. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  11. package/build/shared/errors/codes.js +2 -1
  12. package/build/shared/errors/invalidArgument.js +4 -0
  13. package/build/shared/errors/missingArgument.js +4 -0
  14. package/build/shared/generativeAI/getPrompt.js +75 -0
  15. package/build/shared/generativeAI/utils/generativeAIPrompts.js +479 -0
  16. package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
  17. package/build/shared/generativeAI/utils/prompts/rephraseSentences.js +86 -0
  18. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  19. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  20. package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +9 -8
  21. package/build/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  22. package/build/shared/interfaces/resources/TResourceType.js +1 -0
  23. package/dist/esm/shared/charts/descriptors/data/code.js +1 -1
  24. package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
  25. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
  26. package/dist/esm/shared/charts/descriptors/logic/goTo.js +2 -2
  27. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
  28. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +11 -2
  29. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +12 -3
  30. package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  31. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  32. package/dist/esm/shared/errors/codes.js +1 -0
  33. package/dist/esm/shared/errors/invalidArgument.js +4 -0
  34. package/dist/esm/shared/errors/missingArgument.js +4 -0
  35. package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
  36. package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +476 -0
  37. package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
  38. package/dist/esm/shared/generativeAI/utils/prompts/rephraseSentences.js +83 -0
  39. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
  40. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  41. package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +10 -9
  42. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +1 -0
  43. package/dist/esm/shared/interfaces/resources/TResourceType.js +1 -0
  44. package/package.json +1 -1
  45. package/types/index.d.ts +96 -51
@@ -20,13 +20,13 @@ exports.knowledgeFieldTypes = [
20
20
  "daterange",
21
21
  "connection",
22
22
  "condition",
23
- "description"
23
+ "description",
24
24
  ];
25
25
  exports.knowledgeFieldSchema = {
26
26
  title: "knowledgeFieldSchema",
27
27
  type: "object",
28
28
  additionalProperties: false,
29
- properties: Object.assign(Object.assign({}, INodeDescriptorSet_1.nodeFieldSchema.properties), { type: { type: "string", enum: [...exports.knowledgeFieldTypes] }, key: { type: "string", minLength: 1, maxLength: 200 } })
29
+ properties: Object.assign(Object.assign({}, INodeDescriptorSet_1.nodeFieldSchema.properties), { type: { type: "string", enum: [...exports.knowledgeFieldTypes] }, key: { type: "string", minLength: 1, maxLength: 200 } }),
30
30
  };
31
31
  const { type, summary, defaultLabel, sections, form } = INodeDescriptorSet_1.nodeDescriptorSchema.properties;
32
32
  exports.knowledgeDescriptorSchema = {
@@ -39,15 +39,15 @@ exports.knowledgeDescriptorSchema = {
39
39
  summary,
40
40
  sections,
41
41
  form,
42
- fields: { type: "array", items: exports.knowledgeFieldSchema }
43
- }
42
+ fields: { type: "array", items: exports.knowledgeFieldSchema },
43
+ },
44
44
  };
45
45
  const filterNonConfigFields = ({ type }) => !["description"].includes(type);
46
46
  const buildConfigValidationSchema = (fields) => ({
47
47
  type: "object",
48
48
  additionalProperties: false,
49
49
  required: (fields || []).filter(filterNonConfigFields).map(({ key }) => key),
50
- properties: Object.assign({}, (fields || []).filter(filterNonConfigFields).reduce((result, field) => (Object.assign(Object.assign({}, result), { [field.key]: mapFieldToSchema(field) })), {}))
50
+ properties: Object.assign({}, (fields || []).filter(filterNonConfigFields).reduce((result, field) => (Object.assign(Object.assign({}, result), { [field.key]: mapFieldToSchema(field) })), {})),
51
51
  });
52
52
  exports.buildConfigValidationSchema = buildConfigValidationSchema;
53
53
  const mapFieldToSchema = ({ type, params }) => {
@@ -59,19 +59,20 @@ const mapFieldToSchema = ({ type, params }) => {
59
59
  case "slider":
60
60
  return { type: "number" };
61
61
  case "textArray":
62
+ case "chipInput":
62
63
  return {
63
64
  type: "array",
64
- minLength: (params === null || params === void 0 ? void 0 : params.required) ? 1 : 0
65
+ minLength: (params === null || params === void 0 ? void 0 : params.required) ? 1 : 0,
65
66
  };
66
67
  case "json":
67
68
  return {
68
69
  type: ["object", "array"],
69
- additionalProperties: true
70
+ additionalProperties: true,
70
71
  };
71
72
  default:
72
73
  return {
73
74
  type: "string",
74
- minLength: (params === null || params === void 0 ? void 0 : params.required) ? 1 : 0
75
+ minLength: (params === null || params === void 0 ? void 0 : params.required) ? 1 : 0,
75
76
  };
76
77
  }
77
78
  };
@@ -57,6 +57,7 @@ exports.openAICompatibleMetaSchema = {
57
57
  customModel: { type: "string" },
58
58
  baseCustomUrl: { type: "string" },
59
59
  customAuthHeader: { type: "string" },
60
+ embeddingVectorSize: { type: "number" },
60
61
  }
61
62
  };
62
63
  exports.azureOpenAIMetaSchema = {
@@ -159,6 +159,7 @@ exports.searchableResourceTypes = [
159
159
  "playbook",
160
160
  "project",
161
161
  "snapshot",
162
+ "simulation",
162
163
  ];
163
164
  exports.packageableResourceTypes = [
164
165
  "aiAgent",
@@ -27,7 +27,7 @@ export const CODE = createNodeDescriptor({
27
27
  function: (codeParams) => __awaiter(void 0, void 0, void 0, function* () {
28
28
  const { cognigy } = codeParams;
29
29
  const { api } = cognigy;
30
- api.executeCodeInSecureContext(codeParams);
30
+ yield api.executeCodeInSecureContext(codeParams);
31
31
  })
32
32
  });
33
33
  //# sourceMappingURL=code.js.map
@@ -58,12 +58,22 @@ export const DEBUG_MESSAGE = createNodeDescriptor({
58
58
  function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
59
59
  const { api } = cognigy;
60
60
  const { level, message, header } = config;
61
- if (level && message) {
61
+ let messageToOutput = message;
62
+ //Atp message can be of type any since cognigyScript can return any type
63
+ // whereas logDebugMessage expects a string or object
64
+ // so we need to change the type of message to string if not string or object
65
+ if (message === undefined || message === null) {
66
+ return;
67
+ }
68
+ else if (typeof message !== "string" && typeof message !== "object") {
69
+ messageToOutput = JSON.stringify(message);
70
+ }
71
+ if (level) {
62
72
  if (level === "info") {
63
- api.logDebugMessage(message, header);
73
+ api.logDebugMessage(messageToOutput, header);
64
74
  }
65
75
  if (level === "error") {
66
- api.logDebugError(message, header);
76
+ api.logDebugError(messageToOutput, header);
67
77
  }
68
78
  }
69
79
  })
@@ -4,6 +4,7 @@ import { createNodeDescriptor } from "../../createNodeDescriptor";
4
4
  import { GO_TO } from "../logic";
5
5
  import { writeLLMDebugLogs } from "../nlu/generativeSlotFiller/prompt";
6
6
  import { randomUUID } from 'crypto';
7
+ import { isOpenAIChatPrompt } from "../../../generativeAI/getPrompt";
7
8
  import { InternalServerError } from "../../../errors";
8
9
  /**
9
10
  * Returns the simplified english name for a language given a language code
@@ -547,7 +548,7 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
547
548
  },
548
549
  tags: ["ai", "knowledgeSearch", "knowledge", "search"],
549
550
  function: (knowledgeSearchParams) => __awaiter(void 0, void 0, void 0, function* () {
550
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
551
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
551
552
  const { cognigy, config, nodeId } = knowledgeSearchParams;
552
553
  const { input, api } = cognigy;
553
554
  const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
@@ -574,53 +575,24 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
574
575
  // check if follow up detection is active and if yes, handle accordingly
575
576
  // this is "context aware search"
576
577
  if (followUpDetection === "transcript") {
577
- let prompt;
578
- let lastRoundTrip;
579
- // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
580
- followUpDetectionSteps = followUpDetectionSteps || 2;
581
578
  // check whether we're in an flow execution that's not the first
582
579
  // as it doesn't make sense to check for follow ups in the first execution
583
580
  if (input.execution > 1) {
581
+ // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
582
+ followUpDetectionSteps = followUpDetectionSteps || 2;
584
583
  // always remember the last thing the user said (needed later)
585
- lastRoundTrip = (_b = cognigy
586
- .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
587
- // if follow up detection is set to 2 or more, we use the conversation transcript
588
- // as reference. Start at the second entry, because the first one is the current
589
- const recentConversation = (_c = cognigy
590
- .lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
591
- prompt = `Below is the transcript of a conversation:
592
- ${recentConversation}
593
- USER: ${searchString}
594
- Does the last USER input refer to the conversation before?
595
- Answer with "true" or "false". Answer:`;
596
- let promptResponse;
597
- // set the detailed results to true to get the token usage
598
- const returnDetailedResults = true;
584
+ const chatHistory = (_b = cognigy
585
+ .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(0, followUpDetectionSteps + 1).reverse();
599
586
  try {
600
- const firstFollowUpResponse = yield api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
601
- promptResponse = firstFollowUpResponse.result;
587
+ const promptData = {
588
+ // set the detailed results to true to get the token usage
589
+ detailedResults: true
590
+ };
591
+ const rephrasedUserQueryResponse = yield api.runGenerativeAIPromptForUseCase(promptData, "answerExtraction", "contextAwareUserQueryRephrasing", getContextAwareUserQueryRephrasingPromptParser(chatHistory));
592
+ const promptResponse = rephrasedUserQueryResponse.result;
602
593
  // if we're in adminconsole, process debugging options
603
- writeLLMDebugLogs("Search Extract Output Follow Up Detection", prompt, firstFollowUpResponse, debugLogTokenCount, false, cognigy);
604
- // check if LLM thinks the input was a follow up
605
- if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
606
- prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
607
-
608
- Example:
609
- Context:
610
- USER: Where is Germany?
611
- BOT: Germany is in Europe.
612
- Question: Is that a continent?
613
- New: Is Europe a continent?
614
-
615
- Task:
616
- Context:
617
- ${lastRoundTrip}
618
- Question: ${searchString}
619
- New: `;
620
- const secondFollowUpResponse = yield api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
621
- promptResponse = secondFollowUpResponse.result;
622
- // if we're in adminconsole, process debugging options
623
- writeLLMDebugLogs("Search Extract Output Follow Up Detection 2", prompt, secondFollowUpResponse, debugLogTokenCount, false, cognigy);
594
+ writeLLMDebugLogs("Search Extract Output Follow Up Detection", prompt, rephrasedUserQueryResponse, debugLogTokenCount, false, cognigy);
595
+ if ((promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase()) !== "false") {
624
596
  // the actual search string to now use is the rewritten question
625
597
  actualSearchString = promptResponse;
626
598
  api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
@@ -634,7 +606,7 @@ New: `;
634
606
  let knowledgeSearchResponseData;
635
607
  // handle errors from external services, depending on the settings
636
608
  const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
637
- var _m;
609
+ var _l;
638
610
  const compactError = {
639
611
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
640
612
  };
@@ -670,7 +642,7 @@ New: `;
670
642
  isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
671
643
  });
672
644
  }
673
- if ((_m = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _m === void 0 ? void 0 : _m.code) {
645
+ if ((_l = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _l === void 0 ? void 0 : _l.code) {
674
646
  compactError["code"] = error.originalErrorDetails.code;
675
647
  }
676
648
  let searchStoreDataWithError = {
@@ -743,7 +715,7 @@ New: `;
743
715
  // Perform knowledge search
744
716
  try {
745
717
  // Set understood to true so that the interaction doesn't look false in our analytics
746
- (_d = api.setAnalyticsData) === null || _d === void 0 ? void 0 : _d.call(api, "understood", "true");
718
+ (_c = api.setAnalyticsData) === null || _c === void 0 ? void 0 : _c.call(api, "understood", "true");
747
719
  input.understood = true;
748
720
  const knowledgeSearchResponse = yield api.knowledgeSearch(data);
749
721
  writeLLMDebugLogs("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
@@ -776,7 +748,7 @@ New: `;
776
748
  }
777
749
  // #endregion 1 Perform Search
778
750
  // #region 2 Perform Answer Extraction
779
- let documents = (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
751
+ let documents = (_d = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _d === void 0 ? void 0 : _d.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
780
752
  const replacedUserInput = input.text + (actualSearchString !== input.text ? ` possibly meaning "${actualSearchString}"` : "");
781
753
  prompt = prompt.replace(/@userInput/g, replacedUserInput);
782
754
  prompt = prompt.replace(/@foundDocuments/g, documents);
@@ -981,7 +953,7 @@ New: `;
981
953
  {
982
954
  "separator": true,
983
955
  "type": "TextBlock",
984
- "text": (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0].text,
956
+ "text": (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e[0].text,
985
957
  "wrap": true,
986
958
  "spacing": "Padding"
987
959
  }
@@ -1002,7 +974,7 @@ New: `;
1002
974
  "version": "1.6"
1003
975
  };
1004
976
  // @ts-ignore
1005
- if ((_j = (_h = (_g = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.chunkMetaData) === null || _j === void 0 ? void 0 : _j.url) {
977
+ if ((_h = (_g = (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0]) === null || _g === void 0 ? void 0 : _g.chunkMetaData) === null || _h === void 0 ? void 0 : _h.url) {
1006
978
  ADAPTIVE_CARD_RESULT.body[2].items[0].columns[1].items.push({
1007
979
  "type": "ActionSet",
1008
980
  "actions": [
@@ -1010,7 +982,7 @@ New: `;
1010
982
  "type": "Action.OpenUrl",
1011
983
  "title": "Open Source",
1012
984
  // @ts-ignore
1013
- "url": (_k = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _k === void 0 ? void 0 : _k[0].chunkMetaData.url
985
+ "url": (_j = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _j === void 0 ? void 0 : _j[0].chunkMetaData.url
1014
986
  }
1015
987
  ],
1016
988
  "separator": true
@@ -1040,7 +1012,7 @@ New: `;
1040
1012
  yield api.output(promptResponse, null);
1041
1013
  }
1042
1014
  else if (mainPromptResponse.finishReason) {
1043
- (_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, "", {
1015
+ (_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, "", {
1044
1016
  _cognigy: {
1045
1017
  _preventTranscript: true,
1046
1018
  _messageId,
@@ -1065,4 +1037,30 @@ New: `;
1065
1037
  }
1066
1038
  })
1067
1039
  });
1040
+ /**
1041
+ * Parses the prompt for the context-aware user query rephrasing.
1042
+ * It replaces the "@@chatHistory" variable with the chat history messages.
1043
+ * It replaces the "@@userQuery" variable with the last user message.
1044
+ *
1045
+ * @param chatHistory - The chat history to be used for context.
1046
+ * @return A function that takes a raw prompt and returns the modified prompt.
1047
+ */
1048
+ export function getContextAwareUserQueryRephrasingPromptParser(chatHistory) {
1049
+ return (rawPrompt) => {
1050
+ if (isOpenAIChatPrompt(rawPrompt)) {
1051
+ const modifiedPrompt = [...rawPrompt];
1052
+ for (const message of chatHistory) {
1053
+ const role = message.source === "user" ? "user" : "assistant";
1054
+ modifiedPrompt.push({
1055
+ role,
1056
+ content: message.text
1057
+ });
1058
+ }
1059
+ return modifiedPrompt;
1060
+ }
1061
+ else {
1062
+ throw new InternalServerError(`Invalid prompt type for context-aware user query rephrasing. Expected a chat prompt.`);
1063
+ }
1064
+ };
1065
+ }
1068
1066
  //# sourceMappingURL=searchExtractOutput.js.map
@@ -132,12 +132,12 @@ export const GO_TO = createNodeDescriptor({
132
132
  }
133
133
  if (!api.checkThink(thisNodeId)) {
134
134
  api.resetNextNodes();
135
- api.setThinkMarker(config.flowNode.flow);
135
+ api.setThinkMarker(flowId);
136
136
  // Check if execution is to wait at Node for Input
137
137
  if (executionMode === "wait") {
138
138
  // only set the next node if the execution mode is "wait",
139
139
  // otherwise input after the goto node would again go to this node, and not to the start node
140
- yield api.goToNode(config.flowNode);
140
+ yield api.goToNode({ flowNode: config.flowNode, absorbContext });
141
141
  return;
142
142
  }
143
143
  if (injectedText) {
@@ -151,6 +151,10 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
151
151
  completionTokenMessage = ` (${completionTokens} Tokens)`;
152
152
  }
153
153
  }
154
+ let promptString = prompt;
155
+ if (typeof prompt != "string") {
156
+ promptString = promptToString(prompt);
157
+ }
154
158
  let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
155
159
  let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
156
160
  if (nodeType === "llmPromptV2") {
@@ -158,7 +162,7 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
158
162
  headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
159
163
  }
160
164
  ;
161
- api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
165
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${promptString}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
162
166
  }
163
167
  catch (err) { }
164
168
  }
@@ -198,4 +202,28 @@ export const convertChatToPrompt = (chat) => {
198
202
  prompt += "assistant: ";
199
203
  return prompt;
200
204
  };
205
+ /**
206
+ * Converts a TALLPrompts object into a string representation.
207
+ * @param prompt The prompt to convert to a string
208
+ * @returns The string representation of the prompt
209
+ */
210
+ export function promptToString(prompt) {
211
+ if ("prompt" in prompt) {
212
+ // TCompletionPrompt
213
+ return prompt.prompt;
214
+ }
215
+ else if ("messages" in prompt) {
216
+ // TChatPrompt
217
+ return prompt.messages
218
+ .map((msg) => `[${msg.role}] ${msg.content}`)
219
+ .join("\n");
220
+ }
221
+ else if (Array.isArray(prompt)) {
222
+ // OpenAIChatMessage[]
223
+ return prompt
224
+ .map((msg) => `[${msg.role}] ${msg.content}`)
225
+ .join("\n");
226
+ }
227
+ return "";
228
+ }
201
229
  //# sourceMappingURL=prompt.js.map
@@ -408,6 +408,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
408
408
  step: 0.1
409
409
  }
410
410
  },
411
+ {
412
+ key: "useTextAlternativeForLLM",
413
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
414
+ type: "toggle",
415
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
416
+ defaultValue: true,
417
+ },
411
418
  {
412
419
  key: "logErrorToSystem",
413
420
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
@@ -803,6 +810,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
803
810
  "timeoutInMs",
804
811
  "maxTokens",
805
812
  "temperature",
813
+ "useTextAlternativeForLLM",
806
814
  ],
807
815
  },
808
816
  {
@@ -850,7 +858,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
850
858
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
851
859
  var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
852
860
  const { api, context, input, profile, flowReferenceId } = cognigy;
853
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
861
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
854
862
  try {
855
863
  if (!aiAgent) {
856
864
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -1128,7 +1136,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1128
1136
  const transcript = yield api.getTranscript({
1129
1137
  limit: 50,
1130
1138
  rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
1131
- excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
1139
+ excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
1140
+ useTextAlternativeForLLM,
1132
1141
  });
1133
1142
  // For knowledgeSearch "always", we enhance the user input with the knowledge search response data
1134
1143
  if (knowledgeSearchBehavior === "always" &&
@@ -388,6 +388,13 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
388
388
  ]
389
389
  }
390
390
  },
391
+ {
392
+ key: "useTextAlternativeForLLM",
393
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
394
+ type: "toggle",
395
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
396
+ defaultValue: true,
397
+ },
391
398
  {
392
399
  key: "customModelOptions",
393
400
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
@@ -539,7 +546,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
539
546
  "frequencyPenalty",
540
547
  "useStop",
541
548
  "stop",
542
- "seed"
549
+ "seed",
550
+ "useTextAlternativeForLLM",
543
551
  ]
544
552
  },
545
553
  {
@@ -628,7 +636,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
628
636
  var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
629
637
  const { api, input, flowReferenceId } = cognigy;
630
638
  const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
631
- errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
639
+ errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
632
640
  let prompt = config.prompt || "";
633
641
  const { traceId } = input;
634
642
  // check if custom variables are used and if they have a length modifier
@@ -773,7 +781,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
773
781
  const transcript = yield api.getTranscript({
774
782
  limit: chatTranscriptSteps || 50,
775
783
  rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
776
- excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
784
+ excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
785
+ useTextAlternativeForLLM,
777
786
  });
778
787
  llmPromptOptions["transcript"] = transcript;
779
788
  llmPromptOptions["chat"] = [{
@@ -68,6 +68,13 @@ export const GET_TRANSCRIPT = createNodeDescriptor({
68
68
  value: "context",
69
69
  }
70
70
  },
71
+ {
72
+ key: "includeTextAlternativeInTranscript",
73
+ type: "toggle",
74
+ label: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__LABEL",
75
+ description: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__DESCRIPTION",
76
+ defaultValue: true,
77
+ },
71
78
  ],
72
79
  sections: [
73
80
  {
@@ -79,18 +86,31 @@ export const GET_TRANSCRIPT = createNodeDescriptor({
79
86
  "inputKey",
80
87
  "contextKey",
81
88
  ]
82
- }
89
+ },
90
+ {
91
+ key: "advanced",
92
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
93
+ defaultCollapsed: true,
94
+ fields: [
95
+ "includeTextAlternativeInTranscript",
96
+ ],
97
+ },
83
98
  ],
84
99
  form: [
85
100
  { type: "field", key: "limit" },
86
101
  { type: "section", key: "storage" },
102
+ { type: "section", key: "advanced" },
87
103
  ],
88
104
  tags: ["service", "transcripts"],
89
105
  function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
90
106
  var _a;
91
- const { limit, storeLocation, inputKey, contextKey } = config;
107
+ const { limit, storeLocation, inputKey, contextKey, includeTextAlternativeInTranscript } = config;
92
108
  const { api } = cognigy;
93
- const transcript = yield api.getTranscript({ limit, excludeDataOnlyMessagesFilter: [TranscriptRole.AGENT, TranscriptRole.ASSISTANT] });
109
+ const transcript = yield api.getTranscript({
110
+ limit,
111
+ excludeDataOnlyMessagesFilter: [TranscriptRole.AGENT],
112
+ includeTextAlternativeInTranscript,
113
+ });
94
114
  if (storeLocation === "context") {
95
115
  (_a = api.addToContext) === null || _a === void 0 ? void 0 : _a.call(api, contextKey, transcript, "simple");
96
116
  }
@@ -143,6 +143,9 @@ class SessionConfigMapper extends BaseMapper {
143
143
  const timeout = Number(spAsrTimeout || asrTimeout);
144
144
  recognizer.asrTimeout = timeout / 1000 || undefined;
145
145
  }
146
+ else if (asrEnabled === false || spAsrEnabled === false) {
147
+ recognizer.asrTimeout = 0;
148
+ }
146
149
  return recognizer;
147
150
  }
148
151
  isDtmfEnabled(sessionParams, dtmf) {
@@ -1,4 +1,5 @@
1
1
  import { ErrorCode } from "./ErrorCode";
2
+ export const BAD_REQUEST_ERROR = ErrorCode.BAD_REQUEST;
2
3
  export const MISSING_ARGUMENT_ERROR = ErrorCode.MISSING_ARGUMENT_ERROR;
3
4
  export const DATABASE_WRITE_ERROR = ErrorCode.DATABASE_WRITE_ERROR;
4
5
  export const RESOURCE_NOT_FOUND_ERROR = ErrorCode.RESOURCE_NOT_FOUND_ERROR;
@@ -7,5 +7,9 @@ export class InvalidArgumentError extends BadRequestError {
7
7
  this.name = "Invalid Argument Error";
8
8
  this.code = ErrorCode.INVALID_ARGUMENT_ERROR;
9
9
  }
10
+ toRFC7807Response(data) {
11
+ const baseResponse = super.toRFC7807Response(data);
12
+ return Object.assign(Object.assign({}, baseResponse), { type: "Bad Request" });
13
+ }
10
14
  }
11
15
  //# sourceMappingURL=invalidArgument.js.map
@@ -7,5 +7,9 @@ export class MissingArgumentError extends BadRequestError {
7
7
  this.name = "Missing Argument Error";
8
8
  this.code = ErrorCode.MISSING_ARGUMENT_ERROR;
9
9
  }
10
+ toRFC7807Response(data) {
11
+ const baseResponse = super.toRFC7807Response(data);
12
+ return Object.assign(Object.assign({}, baseResponse), { type: "Bad Request" });
13
+ }
10
14
  }
11
15
  //# sourceMappingURL=missingArgument.js.map
@@ -0,0 +1,68 @@
1
+ /** Custom Modules */
2
+ import { generativeAIPrompts } from "./utils/generativeAIPrompts";
3
+ import { InternalServerError } from "../errors/internalServerError";
4
+ export const isCompletionPrompt = (data) => {
5
+ return typeof data === "object" && data !== null && "prompt" in data;
6
+ };
7
+ export const isChatPrompt = (data) => {
8
+ return typeof data === "object" && data !== null && "messages" in data;
9
+ };
10
+ export const isOpenAIChatPrompt = (data) => {
11
+ return Array.isArray(data) &&
12
+ data.every((item) => typeof item === "object" &&
13
+ item !== null &&
14
+ ("role" in item) &&
15
+ ("content" in item) &&
16
+ (item.role === "system" || item.role === "user" || item.role === "assistant") &&
17
+ (typeof item.content === "string"));
18
+ };
19
+ /**
20
+ * Gets the current prompts for the passed model/useCase
21
+ * @param model - The model to get the prompt for
22
+ * @param useCase - The use case to get the prompt for
23
+ * @param subUseCase - Optional sub-use case to get a specific prompt
24
+ * @param promptParser - Optional function to modify the prompt before returning it
25
+ * @returns {TALLPrompts}
26
+ */
27
+ export const getPrompt = (model, useCase, subUseCase, promptParser) => {
28
+ var _a;
29
+ const loggerMeta = {
30
+ module: "getPrompt.ts",
31
+ label: "generativeAI",
32
+ function: "getPrompt",
33
+ model,
34
+ useCase,
35
+ subUseCase
36
+ };
37
+ let modelPrompts = (_a = generativeAIPrompts[`${model}`]) !== null && _a !== void 0 ? _a : generativeAIPrompts["default"];
38
+ if (!modelPrompts) {
39
+ throw new InternalServerError(`Neither the model "${model}" nor the default fallback have predefined prompts`, undefined, loggerMeta);
40
+ }
41
+ let prompt = modelPrompts[`${useCase}`];
42
+ // generativeAIPrompts[model] has no prompt for use case, so try to fallback to default prompt
43
+ if (!prompt) {
44
+ modelPrompts = generativeAIPrompts["default"];
45
+ if (!modelPrompts) {
46
+ throw new InternalServerError(`The default fallback has no predefined prompts`, undefined, loggerMeta);
47
+ }
48
+ prompt = modelPrompts[`${useCase}`];
49
+ }
50
+ if (!prompt) {
51
+ throw new InternalServerError(`Neither the model "${model}" nor the default fallback define a prompt for useCase "${useCase}"`, undefined, loggerMeta);
52
+ }
53
+ if (subUseCase && prompt && typeof prompt === "object" && `${subUseCase}` in prompt) {
54
+ prompt = prompt[`${subUseCase}`];
55
+ }
56
+ if (!prompt) {
57
+ throw new InternalServerError(`The prompt defined for the model "${model}" or the default fallback, useCase "${useCase}", and subUseCase "${subUseCase}" is invalid`, undefined, loggerMeta);
58
+ }
59
+ try {
60
+ return promptParser
61
+ ? promptParser(JSON.parse(JSON.stringify(prompt)))
62
+ : JSON.parse(JSON.stringify(prompt));
63
+ }
64
+ catch (error) {
65
+ throw new InternalServerError(`Error while parsing prompt for model: ${model} and useCase: ${useCase} and subUseCase: ${subUseCase}`, undefined, Object.assign({ originalError: error }, loggerMeta));
66
+ }
67
+ };
68
+ //# sourceMappingURL=getPrompt.js.map