@cognigy/rest-api-client 2025.15.1 → 2025.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/build/apigroups/MetricsAPIGroup_2_0.js +10 -0
  3. package/build/apigroups/ResourcesAPIGroup_2_0.js +6 -0
  4. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -0
  5. package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
  6. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
  7. package/build/shared/charts/descriptors/logic/if/if.js +2 -2
  8. package/build/shared/charts/descriptors/logic/switch/switch.js +30 -21
  9. package/build/shared/charts/descriptors/message/question/question.js +3 -3
  10. package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +2 -2
  11. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +40 -24
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
  14. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +36 -21
  15. package/build/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
  16. package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  17. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  18. package/build/shared/generativeAI/getPrompt.js +75 -0
  19. package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
  20. package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
  21. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
  22. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  23. package/build/shared/interfaces/resources/IGetAiAgentJobsTools.js +3 -0
  24. package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +38 -5
  25. package/build/shared/interfaces/resources/ILargeLanguageModel.js +16 -1
  26. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +3 -0
  27. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +3 -0
  28. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +3 -0
  29. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +3 -0
  30. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
  31. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +4 -0
  32. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +16 -0
  35. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +3 -0
  36. package/build/shared/interfaces/security/ICallCounterPreAggregatedValue.js +3 -0
  37. package/build/test.js +39 -0
  38. package/dist/esm/apigroups/MetricsAPIGroup_2_0.js +10 -0
  39. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +6 -0
  40. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -0
  41. package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
  42. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
  43. package/dist/esm/shared/charts/descriptors/logic/if/if.js +2 -2
  44. package/dist/esm/shared/charts/descriptors/logic/switch/switch.js +30 -21
  45. package/dist/esm/shared/charts/descriptors/message/question/question.js +3 -3
  46. package/dist/esm/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -3
  47. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +39 -23
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
  50. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +43 -28
  51. package/dist/esm/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
  52. package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  53. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  54. package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
  55. package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
  56. package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
  57. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
  58. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  59. package/dist/esm/shared/interfaces/resources/IGetAiAgentJobsTools.js +2 -0
  60. package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +37 -5
  61. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +14 -0
  62. package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
  63. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +2 -0
  64. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +2 -0
  65. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +2 -0
  66. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
  68. package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +3 -0
  69. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +2 -1
  72. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +2 -0
  73. package/dist/esm/shared/interfaces/security/ICallCounterPreAggregatedValue.js +2 -0
  74. package/dist/esm/test.js +39 -0
  75. package/package.json +1 -1
  76. package/types/index.d.ts +299 -42
package/CHANGELOG.md CHANGED
@@ -1,8 +1,23 @@
1
+ # 2025.17.0
2
+ Released: August 21th, 2025
3
+
4
+ Released state of package up to date with Cognigy.AI v2025.17.0
5
+
6
+ # 2025.16.0
7
+ Released: August 05th, 2025
8
+
9
+ Released state of package up to date with Cognigy.AI v2025.16.0
10
+
1
11
  # 2025.15.1
2
12
  Released: July 24th, 2025
3
13
 
4
14
  Released state of package up to date with Cognigy.AI v2025.15.1
5
15
 
16
+ # 2025.15.0
17
+ Released: July 22nd, 2025
18
+
19
+ Released state of package up to date with Cognigy.AI v2025.15.0
20
+
6
21
  # 2025.14.0
7
22
  Released: July 08th, 2025
8
23
 
@@ -64,6 +64,16 @@ function MetricsAPIGroup_2_0(instance) {
64
64
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/conversationcounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
65
65
  },
66
66
  getConversationCounterOrganisation: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/conversationcounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
67
+ getPreAggregatedConversationCounter: (_a, options) => {
68
+ var { projectId } = _a, args = __rest(_a, ["projectId"]);
69
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v3.0/projects/${projectId}/conversationcounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
70
+ },
71
+ getPreAggregatedConversationCounterOrganisation: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v3.0/conversationcounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
72
+ getPreAggregatedCallCounter: (_a, options) => {
73
+ var { projectId } = _a, args = __rest(_a, ["projectId"]);
74
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v3.0/projects/${projectId}/callcounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
75
+ },
76
+ getPreAggregatedCallCounterOrganisation: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v3.0/callcounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
67
77
  getKnowledgeQueryCounter: (_a, options) => {
68
78
  var { projectId } = _a, args = __rest(_a, ["projectId"]);
69
79
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/knowledgequerycounter?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
@@ -559,6 +559,7 @@ const ResourcesAPIGroup_2_0 = (instance) => {
559
559
  validateAiAgentName: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/v2.0/aiagents/validatename", "POST", self)(args, options),
560
560
  getAiAgentHiringTemplates: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/v2.0/aiagents/hire/templates", "GET", self)(args, options),
561
561
  hireAiAgent: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/v2.0/aiagents/hire", "POST", self)(args, options),
562
+ getAiAgentJobsAndTools: ({ aiAgentId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/aiagents/${aiAgentId}/jobs`, "GET", self)(undefined, options),
562
563
  generateNodeOutput(_a, options) {
563
564
  var { flowId } = _a, args = __rest(_a, ["flowId"]);
564
565
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/flows/${flowId}/chart/nodes/output/generate`, "POST", self)(args, options);
@@ -591,6 +592,11 @@ const ResourcesAPIGroup_2_0 = (instance) => {
591
592
  var { knowledgeStoreId } = _a, args = __rest(_a, ["knowledgeStoreId"]);
592
593
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/knowledgestores/${knowledgeStoreId}`, "PATCH", self)(args, options);
593
594
  },
595
+ runKnowledgeExtension: (_a, options) => {
596
+ var { knowledgeStoreId } = _a, args = __rest(_a, ["knowledgeStoreId"]);
597
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/knowledgestores/${knowledgeStoreId}/extensions/run`, "POST", self)(args, options);
598
+ },
599
+ indexKnowledgeDescriptors: ({ knowledgeStoreId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/knowledgestores/${knowledgeStoreId}/descriptors`, "GET", self)(undefined, options),
594
600
  indexKnowledgeSources: (_a, options) => {
595
601
  var { knowledgeStoreId } = _a, args = __rest(_a, ["knowledgeStoreId"]);
596
602
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/knowledgestores/${knowledgeStoreId}/sources?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
@@ -44,6 +44,10 @@ function SimulationAPIGroup_2_0(instance) {
44
44
  var { simulationReference, simulationRunBatchReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference"]);
45
45
  return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
46
46
  },
47
+ stopSimulationRunBatch: (_a, options) => {
48
+ var { simulationReference, simulationRunBatchReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference"]);
49
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/stop?${(0, query_1.stringifyQuery)(args)}`, "POST", self)(undefined, options);
50
+ },
47
51
  indexSimulationRuns: (_a, options) => {
48
52
  var { simulationReference, simulationRunBatchReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference"]);
49
53
  return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
@@ -60,12 +60,22 @@ exports.DEBUG_MESSAGE = (0, createNodeDescriptor_1.createNodeDescriptor)({
60
60
  function: async ({ cognigy, config }) => {
61
61
  const { api } = cognigy;
62
62
  const { level, message, header } = config;
63
- if (level && message) {
63
+ let messageToOutput = message;
64
+ //Atp message can be of type any since cognigyScript can return any type
65
+ // whereas logDebugMessage expects a string or object
66
+ // so we need to change the type of message to string if not string or object
67
+ if (message === undefined || message === null) {
68
+ return;
69
+ }
70
+ else if (typeof message !== "string" && typeof message !== "object") {
71
+ messageToOutput = JSON.stringify(message);
72
+ }
73
+ if (level) {
64
74
  if (level === "info") {
65
- api.logDebugMessage(message, header);
75
+ api.logDebugMessage(messageToOutput, header);
66
76
  }
67
77
  if (level === "error") {
68
- api.logDebugError(message, header);
78
+ api.logDebugError(messageToOutput, header);
69
79
  }
70
80
  }
71
81
  }
@@ -1,11 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.SEARCH_EXTRACT_OUTPUT = void 0;
3
+ exports.getContextAwareUserQueryRephrasingPromptParser = exports.SEARCH_EXTRACT_OUTPUT = void 0;
4
4
  /* Custom modules */
5
5
  const createNodeDescriptor_1 = require("../../createNodeDescriptor");
6
6
  const logic_1 = require("../logic");
7
7
  const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
8
8
  const crypto_1 = require("crypto");
9
+ const getPrompt_1 = require("../../../generativeAI/getPrompt");
9
10
  const errors_1 = require("../../../errors");
10
11
  /**
11
12
  * Returns the simplified english name for a language given a language code
@@ -549,7 +550,7 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
549
550
  },
550
551
  tags: ["ai", "knowledgeSearch", "knowledge", "search"],
551
552
  function: async (knowledgeSearchParams) => {
552
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
553
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
553
554
  const { cognigy, config, nodeId } = knowledgeSearchParams;
554
555
  const { input, api } = cognigy;
555
556
  const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
@@ -576,53 +577,24 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
576
577
  // check if follow up detection is active and if yes, handle accordingly
577
578
  // this is "context aware search"
578
579
  if (followUpDetection === "transcript") {
579
- let prompt;
580
- let lastRoundTrip;
581
- // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
582
- followUpDetectionSteps = followUpDetectionSteps || 2;
583
580
  // check whether we're in an flow execution that's not the first
584
581
  // as it doesn't make sense to check for follow ups in the first execution
585
582
  if (input.execution > 1) {
583
+ // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
584
+ followUpDetectionSteps = followUpDetectionSteps || 2;
586
585
  // always remember the last thing the user said (needed later)
587
- lastRoundTrip = (_b = cognigy
588
- .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
589
- // if follow up detection is set to 2 or more, we use the conversation transcript
590
- // as reference. Start at the second entry, because the first one is the current
591
- const recentConversation = (_c = cognigy
592
- .lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
593
- prompt = `Below is the transcript of a conversation:
594
- ${recentConversation}
595
- USER: ${searchString}
596
- Does the last USER input refer to the conversation before?
597
- Answer with "true" or "false". Answer:`;
598
- let promptResponse;
599
- // set the detailed results to true to get the token usage
600
- const returnDetailedResults = true;
586
+ const chatHistory = (_b = cognigy
587
+ .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(0, followUpDetectionSteps + 1).reverse();
601
588
  try {
602
- const firstFollowUpResponse = await api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
603
- promptResponse = firstFollowUpResponse.result;
589
+ const promptData = {
590
+ // set the detailed results to true to get the token usage
591
+ detailedResults: true
592
+ };
593
+ const rephrasedUserQueryResponse = await api.runGenerativeAIPromptForUseCase(promptData, "answerExtraction", "contextAwareUserQueryRephrasing", getContextAwareUserQueryRephrasingPromptParser(chatHistory));
594
+ const promptResponse = rephrasedUserQueryResponse.result;
604
595
  // if we're in adminconsole, process debugging options
605
- (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, firstFollowUpResponse, debugLogTokenCount, false, cognigy);
606
- // check if LLM thinks the input was a follow up
607
- if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
608
- prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
609
-
610
- Example:
611
- Context:
612
- USER: Where is Germany?
613
- BOT: Germany is in Europe.
614
- Question: Is that a continent?
615
- New: Is Europe a continent?
616
-
617
- Task:
618
- Context:
619
- ${lastRoundTrip}
620
- Question: ${searchString}
621
- New: `;
622
- const secondFollowUpResponse = await api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
623
- promptResponse = secondFollowUpResponse.result;
624
- // if we're in adminconsole, process debugging options
625
- (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection 2", prompt, secondFollowUpResponse, debugLogTokenCount, false, cognigy);
596
+ (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Follow Up Detection", prompt, rephrasedUserQueryResponse, debugLogTokenCount, false, cognigy);
597
+ if ((promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase()) !== "false") {
626
598
  // the actual search string to now use is the rewritten question
627
599
  actualSearchString = promptResponse;
628
600
  api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
@@ -745,7 +717,7 @@ New: `;
745
717
  // Perform knowledge search
746
718
  try {
747
719
  // Set understood to true so that the interaction doesn't look false in our analytics
748
- (_d = api.setAnalyticsData) === null || _d === void 0 ? void 0 : _d.call(api, "understood", "true");
720
+ (_c = api.setAnalyticsData) === null || _c === void 0 ? void 0 : _c.call(api, "understood", "true");
749
721
  input.understood = true;
750
722
  const knowledgeSearchResponse = await api.knowledgeSearch(data);
751
723
  (0, prompt_1.writeLLMDebugLogs)("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
@@ -778,7 +750,7 @@ New: `;
778
750
  }
779
751
  // #endregion 1 Perform Search
780
752
  // #region 2 Perform Answer Extraction
781
- let documents = (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
753
+ let documents = (_d = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _d === void 0 ? void 0 : _d.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
782
754
  const replacedUserInput = input.text + (actualSearchString !== input.text ? ` possibly meaning "${actualSearchString}"` : "");
783
755
  prompt = prompt.replace(/@userInput/g, replacedUserInput);
784
756
  prompt = prompt.replace(/@foundDocuments/g, documents);
@@ -983,7 +955,7 @@ New: `;
983
955
  {
984
956
  "separator": true,
985
957
  "type": "TextBlock",
986
- "text": (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0].text,
958
+ "text": (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e[0].text,
987
959
  "wrap": true,
988
960
  "spacing": "Padding"
989
961
  }
@@ -1004,7 +976,7 @@ New: `;
1004
976
  "version": "1.6"
1005
977
  };
1006
978
  // @ts-ignore
1007
- if ((_j = (_h = (_g = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.chunkMetaData) === null || _j === void 0 ? void 0 : _j.url) {
979
+ if ((_h = (_g = (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0]) === null || _g === void 0 ? void 0 : _g.chunkMetaData) === null || _h === void 0 ? void 0 : _h.url) {
1008
980
  ADAPTIVE_CARD_RESULT.body[2].items[0].columns[1].items.push({
1009
981
  "type": "ActionSet",
1010
982
  "actions": [
@@ -1012,7 +984,7 @@ New: `;
1012
984
  "type": "Action.OpenUrl",
1013
985
  "title": "Open Source",
1014
986
  // @ts-ignore
1015
- "url": (_k = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _k === void 0 ? void 0 : _k[0].chunkMetaData.url
987
+ "url": (_j = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _j === void 0 ? void 0 : _j[0].chunkMetaData.url
1016
988
  }
1017
989
  ],
1018
990
  "separator": true
@@ -1042,7 +1014,7 @@ New: `;
1042
1014
  await api.output(promptResponse, null);
1043
1015
  }
1044
1016
  else if (mainPromptResponse.finishReason) {
1045
- (_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, "", {
1017
+ (_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, "", {
1046
1018
  _cognigy: {
1047
1019
  _preventTranscript: true,
1048
1020
  _messageId,
@@ -1067,4 +1039,31 @@ New: `;
1067
1039
  }
1068
1040
  }
1069
1041
  });
1042
+ /**
1043
+ * Parses the prompt for the context-aware user query rephrasing.
1044
+ * It replaces the "@@chatHistory" variable with the chat history messages.
1045
+ * It replaces the "@@userQuery" variable with the last user message.
1046
+ *
1047
+ * @param chatHistory - The chat history to be used for context.
1048
+ * @return A function that takes a raw prompt and returns the modified prompt.
1049
+ */
1050
+ function getContextAwareUserQueryRephrasingPromptParser(chatHistory) {
1051
+ return (rawPrompt) => {
1052
+ if ((0, getPrompt_1.isOpenAIChatPrompt)(rawPrompt)) {
1053
+ const modifiedPrompt = [...rawPrompt];
1054
+ for (const message of chatHistory) {
1055
+ const role = message.source === "user" ? "user" : "assistant";
1056
+ modifiedPrompt.push({
1057
+ role,
1058
+ content: message.text
1059
+ });
1060
+ }
1061
+ return modifiedPrompt;
1062
+ }
1063
+ else {
1064
+ throw new errors_1.InternalServerError(`Invalid prompt type for context-aware user query rephrasing. Expected a chat prompt.`);
1065
+ }
1066
+ };
1067
+ }
1068
+ exports.getContextAwareUserQueryRephrasingPromptParser = getContextAwareUserQueryRephrasingPromptParser;
1070
1069
  //# sourceMappingURL=searchExtractOutput.js.map
@@ -55,13 +55,13 @@ exports.IF = (0, createNodeDescriptor_1.createNodeDescriptor)({
55
55
  switch (type) {
56
56
  case "rule":
57
57
  {
58
- isConditionTrue = api.evaluateRule(rule);
58
+ isConditionTrue = await api.evaluateRule(rule);
59
59
  }
60
60
  break;
61
61
  case "condition":
62
62
  default:
63
63
  {
64
- isConditionTrue = api.parseCognigyScriptCondition(condition);
64
+ isConditionTrue = await api.parseCognigyScriptCondition(condition);
65
65
  }
66
66
  break;
67
67
  }
@@ -190,32 +190,41 @@ exports.SWITCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
190
190
  * by mistake, then CS has already been parsed,
191
191
  * causing the parser return empty string.
192
192
  */
193
- parsedOperator = (_a = api.parseCognigyScriptText(`{{${operator}}}`)) !== null && _a !== void 0 ? _a : operator;
193
+ parsedOperator = (_a = (await api.parseCognigyScriptText(`{{${operator}}}`))) !== null && _a !== void 0 ? _a : operator;
194
194
  if (parsedOperator === "") {
195
195
  parsedOperator = operator;
196
196
  }
197
197
  }
198
- const matchedCase = children === null || children === void 0 ? void 0 : children.find((child) => {
199
- var _a, _b;
200
- if (child.type !== "case") {
201
- return;
198
+ let matchedCase = undefined;
199
+ if (children) {
200
+ for (const child of children) {
201
+ if (child.type !== "case") {
202
+ continue;
203
+ }
204
+ const check = async () => {
205
+ var _a, _b;
206
+ if (useStrict) {
207
+ return (await api.parseCognigyScriptText(`${(_a = child.config.case.value) === null || _a === void 0 ? void 0 : _a.trim()}`)) === parsedOperator;
208
+ }
209
+ else {
210
+ /**
211
+ * We cast the case
212
+ * and the operator to strings to avoid issues
213
+ * where e.g. the case contains a string but the
214
+ * operator is a number. We do not support switching on
215
+ * objects
216
+ */
217
+ const parsedCognigyScriptText = `${await api.parseCognigyScriptText(`${(_b = child.config.case.value) === null || _b === void 0 ? void 0 : _b.trim()}`)}`;
218
+ const parsedOperatorString = `${parsedOperator}`;
219
+ return parsedCognigyScriptText === parsedOperatorString;
220
+ }
221
+ };
222
+ if (await check()) {
223
+ matchedCase = child;
224
+ break;
225
+ }
202
226
  }
203
- if (useStrict) {
204
- return api.parseCognigyScriptText(`${(_a = child.config.case.value) === null || _a === void 0 ? void 0 : _a.trim()}`) === parsedOperator;
205
- }
206
- else {
207
- /**
208
- * We cast the case
209
- * and the operator to strings to avoid issues
210
- * where e.g. the case contains a string but the
211
- * operator is a number. We do not support switching on
212
- * objects
213
- */
214
- const parsedCognigyScriptText = `${api.parseCognigyScriptText(`${(_b = child.config.case.value) === null || _b === void 0 ? void 0 : _b.trim()}`)}`;
215
- const parsedOperatorString = `${parsedOperator}`;
216
- return parsedCognigyScriptText === parsedOperatorString;
217
- }
218
- });
227
+ }
219
228
  if (matchedCase) {
220
229
  api.setNextNode(matchedCase.id);
221
230
  return;
@@ -1899,7 +1899,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1899
1899
  // set input.result, so we can use it for validation
1900
1900
  input.result = result;
1901
1901
  // Verify that answer is valid based on some other conditions defined in the function
1902
- const isValid = (0, validateQuestionAnswer_1.validateQuestionAnswer)(cognigy, config);
1902
+ const isValid = await (0, validateQuestionAnswer_1.validateQuestionAnswer)(cognigy, config);
1903
1903
  if (!isValid) {
1904
1904
  input.result = null;
1905
1905
  }
@@ -2057,7 +2057,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2057
2057
  // if a result location was specified, try to get the result from that location
2058
2058
  // if the location returns a falsey value, the answer is invalid
2059
2059
  if (resultLocation && result) {
2060
- result = api.parseCognigyScriptResultLocation(resultLocation);
2060
+ result = await api.parseCognigyScriptResultLocation(resultLocation);
2061
2061
  // If we want detailed results, augment the result object accordingly
2062
2062
  if (storeDetailedResults && result !== null && result !== undefined) {
2063
2063
  result = {
@@ -2410,7 +2410,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2410
2410
  // #region 5.2.3 Reprompt
2411
2411
  // check if there is an extra condition defined for reprompts and check whether it was truthy
2412
2412
  if (sayReprompt && repromptCondition) {
2413
- const repromptConditionResult = !!api.parseCognigyScriptCondition(repromptCondition);
2413
+ const repromptConditionResult = !!await api.parseCognigyScriptCondition(repromptCondition);
2414
2414
  !repromptConditionResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_6`, "Skipping Reprompt Message");
2415
2415
  sayReprompt = repromptConditionResult;
2416
2416
  }
@@ -1,13 +1,13 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.validateQuestionAnswer = void 0;
4
- const validateQuestionAnswer = (cognigy, config) => {
4
+ const validateQuestionAnswer = async (cognigy, config) => {
5
5
  const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents } = config;
6
6
  const { input, api } = cognigy;
7
7
  let isValid = true;
8
8
  // check if there is an extra condition defined and check whether it was truthy
9
9
  if (additionalValidation) {
10
- const additionalValidationResult = !!api.parseCognigyScriptCondition(additionalValidation);
10
+ const additionalValidationResult = !!await api.parseCognigyScriptCondition(additionalValidation);
11
11
  !additionalValidationResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_7`, "Invalid Answer");
12
12
  isValid = additionalValidationResult;
13
13
  }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.convertChatToPrompt = exports.writeLLMDebugLogs = exports.createLastUserInputString = exports.createLastConversationChatObject = exports.createLastConverationString = exports.createInvalidAnswerPrompt = exports.createQuestionPrompt = exports.createExtractionPrompt = void 0;
3
+ exports.promptToString = exports.convertChatToPrompt = exports.writeLLMDebugLogs = exports.createLastUserInputString = exports.createLastConversationChatObject = exports.createLastConverationString = exports.createInvalidAnswerPrompt = exports.createQuestionPrompt = exports.createExtractionPrompt = void 0;
4
4
  const createExtractionPrompt = (slots, lastConversationEntries) => {
5
5
  const userInput = lastConversationEntries.filter(entry => entry.source === "user").map(entry => "- " + entry.text).join("\n");
6
6
  const conversation = (0, exports.createLastConverationString)(lastConversationEntries);
@@ -159,6 +159,10 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
159
159
  completionTokenMessage = ` (${completionTokens} Tokens)`;
160
160
  }
161
161
  }
162
+ let promptString = prompt;
163
+ if (typeof prompt != "string") {
164
+ promptString = promptToString(prompt);
165
+ }
162
166
  let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
163
167
  let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
164
168
  if (nodeType === "llmPromptV2") {
@@ -166,7 +170,7 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
166
170
  headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
167
171
  }
168
172
  ;
169
- api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
173
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${promptString}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
170
174
  }
171
175
  catch (err) { }
172
176
  }
@@ -208,4 +212,29 @@ const convertChatToPrompt = (chat) => {
208
212
  return prompt;
209
213
  };
210
214
  exports.convertChatToPrompt = convertChatToPrompt;
215
+ /**
216
+ * Converts a TALLPrompts object into a string representation.
217
+ * @param prompt The prompt to convert to a string
218
+ * @returns The string representation of the prompt
219
+ */
220
+ function promptToString(prompt) {
221
+ if ("prompt" in prompt) {
222
+ // TCompletionPrompt
223
+ return prompt.prompt;
224
+ }
225
+ else if ("messages" in prompt) {
226
+ // TChatPrompt
227
+ return prompt.messages
228
+ .map((msg) => `[${msg.role}] ${msg.content}`)
229
+ .join("\n");
230
+ }
231
+ else if (Array.isArray(prompt)) {
232
+ // OpenAIChatMessage[]
233
+ return prompt
234
+ .map((msg) => `[${msg.role}] ${msg.content}`)
235
+ .join("\n");
236
+ }
237
+ return "";
238
+ }
239
+ exports.promptToString = promptToString;
211
240
  //# sourceMappingURL=prompt.js.map