@cognigy/rest-api-client 2025.15.1 → 2025.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/build/apigroups/MetricsAPIGroup_2_0.js +10 -0
  3. package/build/apigroups/ResourcesAPIGroup_2_0.js +6 -0
  4. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -0
  5. package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
  6. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
  7. package/build/shared/charts/descriptors/logic/if/if.js +2 -2
  8. package/build/shared/charts/descriptors/logic/switch/switch.js +30 -21
  9. package/build/shared/charts/descriptors/message/question/question.js +3 -3
  10. package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +2 -2
  11. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +40 -24
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
  14. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +36 -21
  15. package/build/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
  16. package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  17. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  18. package/build/shared/generativeAI/getPrompt.js +75 -0
  19. package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
  20. package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
  21. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
  22. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  23. package/build/shared/interfaces/resources/IGetAiAgentJobsTools.js +3 -0
  24. package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +38 -5
  25. package/build/shared/interfaces/resources/ILargeLanguageModel.js +16 -1
  26. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +3 -0
  27. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +3 -0
  28. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +3 -0
  29. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +3 -0
  30. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
  31. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +4 -0
  32. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +16 -0
  35. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +3 -0
  36. package/build/shared/interfaces/security/ICallCounterPreAggregatedValue.js +3 -0
  37. package/build/test.js +39 -0
  38. package/dist/esm/apigroups/MetricsAPIGroup_2_0.js +10 -0
  39. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +6 -0
  40. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -0
  41. package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
  42. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
  43. package/dist/esm/shared/charts/descriptors/logic/if/if.js +2 -2
  44. package/dist/esm/shared/charts/descriptors/logic/switch/switch.js +30 -21
  45. package/dist/esm/shared/charts/descriptors/message/question/question.js +3 -3
  46. package/dist/esm/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -3
  47. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +39 -23
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
  50. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +43 -28
  51. package/dist/esm/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
  52. package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  53. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  54. package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
  55. package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
  56. package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
  57. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
  58. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  59. package/dist/esm/shared/interfaces/resources/IGetAiAgentJobsTools.js +2 -0
  60. package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +37 -5
  61. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +14 -0
  62. package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
  63. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +2 -0
  64. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +2 -0
  65. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +2 -0
  66. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
  68. package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +3 -0
  69. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +2 -1
  72. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +2 -0
  73. package/dist/esm/shared/interfaces/security/ICallCounterPreAggregatedValue.js +2 -0
  74. package/dist/esm/test.js +39 -0
  75. package/package.json +1 -1
  76. package/types/index.d.ts +299 -42
@@ -4,6 +4,7 @@ import { createNodeDescriptor } from "../../createNodeDescriptor";
4
4
  import { GO_TO } from "../logic";
5
5
  import { writeLLMDebugLogs } from "../nlu/generativeSlotFiller/prompt";
6
6
  import { randomUUID } from 'crypto';
7
+ import { isOpenAIChatPrompt } from "../../../generativeAI/getPrompt";
7
8
  import { InternalServerError } from "../../../errors";
8
9
  /**
9
10
  * Returns the simplified english name for a language given a language code
@@ -547,7 +548,7 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
547
548
  },
548
549
  tags: ["ai", "knowledgeSearch", "knowledge", "search"],
549
550
  function: (knowledgeSearchParams) => __awaiter(void 0, void 0, void 0, function* () {
550
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
551
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
551
552
  const { cognigy, config, nodeId } = knowledgeSearchParams;
552
553
  const { input, api } = cognigy;
553
554
  const { topK, searchString, searchStoreLocation, searchStoreLocationContextKey, searchStoreLocationInputKey, searchSourceTags, searchSourceTagsFilterOp, temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, timeoutMessage, outputFallback, outputMode, mode, errorHandling, errorHandlingGotoTarget, streamStopTokens, followUpDetection, debugLogTokenCount, debugLogRequestAndCompletion } = config;
@@ -574,53 +575,24 @@ export const SEARCH_EXTRACT_OUTPUT = createNodeDescriptor({
574
575
  // check if follow up detection is active and if yes, handle accordingly
575
576
  // this is "context aware search"
576
577
  if (followUpDetection === "transcript") {
577
- let prompt;
578
- let lastRoundTrip;
579
- // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
580
- followUpDetectionSteps = followUpDetectionSteps || 2;
581
578
  // check whether we're in an flow execution that's not the first
582
579
  // as it doesn't make sense to check for follow ups in the first execution
583
580
  if (input.execution > 1) {
581
+ // this is a fallback in case the node was created before this function was added and followUpDetectionSteps is undefined
582
+ followUpDetectionSteps = followUpDetectionSteps || 2;
584
583
  // always remember the last thing the user said (needed later)
585
- lastRoundTrip = (_b = cognigy
586
- .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
587
- // if follow up detection is set to 2 or more, we use the conversation transcript
588
- // as reference. Start at the second entry, because the first one is the current
589
- const recentConversation = (_c = cognigy
590
- .lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
591
- prompt = `Below is the transcript of a conversation:
592
- ${recentConversation}
593
- USER: ${searchString}
594
- Does the last USER input refer to the conversation before?
595
- Answer with "true" or "false". Answer:`;
596
- let promptResponse;
597
- // set the detailed results to true to get the token usage
598
- const returnDetailedResults = true;
584
+ const chatHistory = (_b = cognigy
585
+ .lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(0, followUpDetectionSteps + 1).reverse();
599
586
  try {
600
- const firstFollowUpResponse = yield api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
601
- promptResponse = firstFollowUpResponse.result;
587
+ const promptData = {
588
+ // set the detailed results to true to get the token usage
589
+ detailedResults: true
590
+ };
591
+ const rephrasedUserQueryResponse = yield api.runGenerativeAIPromptForUseCase(promptData, "answerExtraction", "contextAwareUserQueryRephrasing", getContextAwareUserQueryRephrasingPromptParser(chatHistory));
592
+ const promptResponse = rephrasedUserQueryResponse.result;
602
593
  // if we're in adminconsole, process debugging options
603
- writeLLMDebugLogs("Search Extract Output Follow Up Detection", prompt, firstFollowUpResponse, debugLogTokenCount, false, cognigy);
604
- // check if LLM thinks the input was a follow up
605
- if (promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase().includes("true")) {
606
- prompt = `You are tasked to rewrite a question based on a context, so that the question is clearer.
607
-
608
- Example:
609
- Context:
610
- USER: Where is Germany?
611
- BOT: Germany is in Europe.
612
- Question: Is that a continent?
613
- New: Is Europe a continent?
614
-
615
- Task:
616
- Context:
617
- ${lastRoundTrip}
618
- Question: ${searchString}
619
- New: `;
620
- const secondFollowUpResponse = yield api.runGenerativeAIPrompt({ prompt, detailedResults: returnDetailedResults }, "answerExtraction");
621
- promptResponse = secondFollowUpResponse.result;
622
- // if we're in adminconsole, process debugging options
623
- writeLLMDebugLogs("Search Extract Output Follow Up Detection 2", prompt, secondFollowUpResponse, debugLogTokenCount, false, cognigy);
594
+ writeLLMDebugLogs("Search Extract Output Follow Up Detection", prompt, rephrasedUserQueryResponse, debugLogTokenCount, false, cognigy);
595
+ if ((promptResponse === null || promptResponse === void 0 ? void 0 : promptResponse.toLowerCase()) !== "false") {
624
596
  // the actual search string to now use is the rewritten question
625
597
  actualSearchString = promptResponse;
626
598
  api.logDebugMessage(`UI__DEBUG_MODE__SEO__MESSAGE '${actualSearchString}'`);
@@ -634,7 +606,7 @@ New: `;
634
606
  let knowledgeSearchResponseData;
635
607
  // handle errors from external services, depending on the settings
636
608
  const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
637
- var _m;
609
+ var _l;
638
610
  const compactError = {
639
611
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
640
612
  };
@@ -670,7 +642,7 @@ New: `;
670
642
  isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
671
643
  });
672
644
  }
673
- if ((_m = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _m === void 0 ? void 0 : _m.code) {
645
+ if ((_l = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _l === void 0 ? void 0 : _l.code) {
674
646
  compactError["code"] = error.originalErrorDetails.code;
675
647
  }
676
648
  let searchStoreDataWithError = {
@@ -743,7 +715,7 @@ New: `;
743
715
  // Perform knowledge search
744
716
  try {
745
717
  // Set understood to true so that the interaction doesn't look false in our analytics
746
- (_d = api.setAnalyticsData) === null || _d === void 0 ? void 0 : _d.call(api, "understood", "true");
718
+ (_c = api.setAnalyticsData) === null || _c === void 0 ? void 0 : _c.call(api, "understood", "true");
747
719
  input.understood = true;
748
720
  const knowledgeSearchResponse = yield api.knowledgeSearch(data);
749
721
  writeLLMDebugLogs("Search Extract Output Embeddings Call", data.query, undefined, debugLogTokenCount, false, cognigy);
@@ -776,7 +748,7 @@ New: `;
776
748
  }
777
749
  // #endregion 1 Perform Search
778
750
  // #region 2 Perform Answer Extraction
779
- let documents = (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
751
+ let documents = (_d = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _d === void 0 ? void 0 : _d.map(result => result === null || result === void 0 ? void 0 : result.text).join(' ');
780
752
  const replacedUserInput = input.text + (actualSearchString !== input.text ? ` possibly meaning "${actualSearchString}"` : "");
781
753
  prompt = prompt.replace(/@userInput/g, replacedUserInput);
782
754
  prompt = prompt.replace(/@foundDocuments/g, documents);
@@ -981,7 +953,7 @@ New: `;
981
953
  {
982
954
  "separator": true,
983
955
  "type": "TextBlock",
984
- "text": (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0].text,
956
+ "text": (_e = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _e === void 0 ? void 0 : _e[0].text,
985
957
  "wrap": true,
986
958
  "spacing": "Padding"
987
959
  }
@@ -1002,7 +974,7 @@ New: `;
1002
974
  "version": "1.6"
1003
975
  };
1004
976
  // @ts-ignore
1005
- if ((_j = (_h = (_g = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.chunkMetaData) === null || _j === void 0 ? void 0 : _j.url) {
977
+ if ((_h = (_g = (_f = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _f === void 0 ? void 0 : _f[0]) === null || _g === void 0 ? void 0 : _g.chunkMetaData) === null || _h === void 0 ? void 0 : _h.url) {
1006
978
  ADAPTIVE_CARD_RESULT.body[2].items[0].columns[1].items.push({
1007
979
  "type": "ActionSet",
1008
980
  "actions": [
@@ -1010,7 +982,7 @@ New: `;
1010
982
  "type": "Action.OpenUrl",
1011
983
  "title": "Open Source",
1012
984
  // @ts-ignore
1013
- "url": (_k = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _k === void 0 ? void 0 : _k[0].chunkMetaData.url
985
+ "url": (_j = knowledgeSearchResponseData === null || knowledgeSearchResponseData === void 0 ? void 0 : knowledgeSearchResponseData.topK) === null || _j === void 0 ? void 0 : _j[0].chunkMetaData.url
1014
986
  }
1015
987
  ],
1016
988
  "separator": true
@@ -1040,7 +1012,7 @@ New: `;
1040
1012
  yield api.output(promptResponse, null);
1041
1013
  }
1042
1014
  else if (mainPromptResponse.finishReason) {
1043
- (_l = api.output) === null || _l === void 0 ? void 0 : _l.call(api, "", {
1015
+ (_k = api.output) === null || _k === void 0 ? void 0 : _k.call(api, "", {
1044
1016
  _cognigy: {
1045
1017
  _preventTranscript: true,
1046
1018
  _messageId,
@@ -1065,4 +1037,30 @@ New: `;
1065
1037
  }
1066
1038
  })
1067
1039
  });
1040
+ /**
1041
+ * Parses the prompt for the context-aware user query rephrasing.
1042
+ * It replaces the "@@chatHistory" variable with the chat history messages.
1043
+ * It replaces the "@@userQuery" variable with the last user message.
1044
+ *
1045
+ * @param chatHistory - The chat history to be used for context.
1046
+ * @return A function that takes a raw prompt and returns the modified prompt.
1047
+ */
1048
+ export function getContextAwareUserQueryRephrasingPromptParser(chatHistory) {
1049
+ return (rawPrompt) => {
1050
+ if (isOpenAIChatPrompt(rawPrompt)) {
1051
+ const modifiedPrompt = [...rawPrompt];
1052
+ for (const message of chatHistory) {
1053
+ const role = message.source === "user" ? "user" : "assistant";
1054
+ modifiedPrompt.push({
1055
+ role,
1056
+ content: message.text
1057
+ });
1058
+ }
1059
+ return modifiedPrompt;
1060
+ }
1061
+ else {
1062
+ throw new InternalServerError(`Invalid prompt type for context-aware user query rephrasing. Expected a chat prompt.`);
1063
+ }
1064
+ };
1065
+ }
1068
1066
  //# sourceMappingURL=searchExtractOutput.js.map
@@ -53,13 +53,13 @@ export const IF = createNodeDescriptor({
53
53
  switch (type) {
54
54
  case "rule":
55
55
  {
56
- isConditionTrue = api.evaluateRule(rule);
56
+ isConditionTrue = yield api.evaluateRule(rule);
57
57
  }
58
58
  break;
59
59
  case "condition":
60
60
  default:
61
61
  {
62
- isConditionTrue = api.parseCognigyScriptCondition(condition);
62
+ isConditionTrue = yield api.parseCognigyScriptCondition(condition);
63
63
  }
64
64
  break;
65
65
  }
@@ -188,32 +188,41 @@ export const SWITCH = createNodeDescriptor({
188
188
  * by mistake, then CS has already been parsed,
189
189
  * causing the parser return empty string.
190
190
  */
191
- parsedOperator = (_a = api.parseCognigyScriptText(`{{${operator}}}`)) !== null && _a !== void 0 ? _a : operator;
191
+ parsedOperator = (_a = (yield api.parseCognigyScriptText(`{{${operator}}}`))) !== null && _a !== void 0 ? _a : operator;
192
192
  if (parsedOperator === "") {
193
193
  parsedOperator = operator;
194
194
  }
195
195
  }
196
- const matchedCase = children === null || children === void 0 ? void 0 : children.find((child) => {
197
- var _a, _b;
198
- if (child.type !== "case") {
199
- return;
196
+ let matchedCase = undefined;
197
+ if (children) {
198
+ for (const child of children) {
199
+ if (child.type !== "case") {
200
+ continue;
201
+ }
202
+ const check = () => __awaiter(void 0, void 0, void 0, function* () {
203
+ var _b, _c;
204
+ if (useStrict) {
205
+ return (yield api.parseCognigyScriptText(`${(_b = child.config.case.value) === null || _b === void 0 ? void 0 : _b.trim()}`)) === parsedOperator;
206
+ }
207
+ else {
208
+ /**
209
+ * We cast the case
210
+ * and the operator to strings to avoid issues
211
+ * where e.g. the case contains a string but the
212
+ * operator is a number. We do not support switching on
213
+ * objects
214
+ */
215
+ const parsedCognigyScriptText = `${yield api.parseCognigyScriptText(`${(_c = child.config.case.value) === null || _c === void 0 ? void 0 : _c.trim()}`)}`;
216
+ const parsedOperatorString = `${parsedOperator}`;
217
+ return parsedCognigyScriptText === parsedOperatorString;
218
+ }
219
+ });
220
+ if (yield check()) {
221
+ matchedCase = child;
222
+ break;
223
+ }
200
224
  }
201
- if (useStrict) {
202
- return api.parseCognigyScriptText(`${(_a = child.config.case.value) === null || _a === void 0 ? void 0 : _a.trim()}`) === parsedOperator;
203
- }
204
- else {
205
- /**
206
- * We cast the case
207
- * and the operator to strings to avoid issues
208
- * where e.g. the case contains a string but the
209
- * operator is a number. We do not support switching on
210
- * objects
211
- */
212
- const parsedCognigyScriptText = `${api.parseCognigyScriptText(`${(_b = child.config.case.value) === null || _b === void 0 ? void 0 : _b.trim()}`)}`;
213
- const parsedOperatorString = `${parsedOperator}`;
214
- return parsedCognigyScriptText === parsedOperatorString;
215
- }
216
- });
225
+ }
217
226
  if (matchedCase) {
218
227
  api.setNextNode(matchedCase.id);
219
228
  return;
@@ -1897,7 +1897,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
1897
1897
  // set input.result, so we can use it for validation
1898
1898
  input.result = result;
1899
1899
  // Verify that answer is valid based on some other conditions defined in the function
1900
- const isValid = validateQuestionAnswer(cognigy, config);
1900
+ const isValid = yield validateQuestionAnswer(cognigy, config);
1901
1901
  if (!isValid) {
1902
1902
  input.result = null;
1903
1903
  }
@@ -2055,7 +2055,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2055
2055
  // if a result location was specified, try to get the result from that location
2056
2056
  // if the location returns a falsey value, the answer is invalid
2057
2057
  if (resultLocation && result) {
2058
- result = api.parseCognigyScriptResultLocation(resultLocation);
2058
+ result = yield api.parseCognigyScriptResultLocation(resultLocation);
2059
2059
  // If we want detailed results, augment the result object accordingly
2060
2060
  if (storeDetailedResults && result !== null && result !== undefined) {
2061
2061
  result = {
@@ -2408,7 +2408,7 @@ DO NOT talk about other topics. Do not offer general assistance.`,
2408
2408
  // #region 5.2.3 Reprompt
2409
2409
  // check if there is an extra condition defined for reprompts and check whether it was truthy
2410
2410
  if (sayReprompt && repromptCondition) {
2411
- const repromptConditionResult = !!api.parseCognigyScriptCondition(repromptCondition);
2411
+ const repromptConditionResult = !!(yield api.parseCognigyScriptCondition(repromptCondition));
2412
2412
  !repromptConditionResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_6`, "Skipping Reprompt Message");
2413
2413
  sayReprompt = repromptConditionResult;
2414
2414
  }
@@ -1,10 +1,11 @@
1
- export const validateQuestionAnswer = (cognigy, config) => {
1
+ import { __awaiter } from "tslib";
2
+ export const validateQuestionAnswer = (cognigy, config) => __awaiter(void 0, void 0, void 0, function* () {
2
3
  const { additionalValidation, escalateIntentsAction, escalateIntentsThreshold, escalateIntentsValidIntents } = config;
3
4
  const { input, api } = cognigy;
4
5
  let isValid = true;
5
6
  // check if there is an extra condition defined and check whether it was truthy
6
7
  if (additionalValidation) {
7
- const additionalValidationResult = !!api.parseCognigyScriptCondition(additionalValidation);
8
+ const additionalValidationResult = !!(yield api.parseCognigyScriptCondition(additionalValidation));
8
9
  !additionalValidationResult && api.logDebugMessage(`UI__DEBUG_MODE__QUESTION__MESSAGE_7`, "Invalid Answer");
9
10
  isValid = additionalValidationResult;
10
11
  }
@@ -16,5 +17,5 @@ export const validateQuestionAnswer = (cognigy, config) => {
16
17
  }
17
18
  }
18
19
  return isValid;
19
- };
20
+ });
20
21
  //# sourceMappingURL=validateQuestionAnswer.js.map
@@ -151,6 +151,10 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
151
151
  completionTokenMessage = ` (${completionTokens} Tokens)`;
152
152
  }
153
153
  }
154
+ let promptString = prompt;
155
+ if (typeof prompt != "string") {
156
+ promptString = promptToString(prompt);
157
+ }
154
158
  let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
155
159
  let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
156
160
  if (nodeType === "llmPromptV2") {
@@ -158,7 +162,7 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
158
162
  headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
159
163
  }
160
164
  ;
161
- api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
165
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${promptString}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
162
166
  }
163
167
  catch (err) { }
164
168
  }
@@ -198,4 +202,28 @@ export const convertChatToPrompt = (chat) => {
198
202
  prompt += "assistant: ";
199
203
  return prompt;
200
204
  };
205
+ /**
206
+ * Converts a TALLPrompts object into a string representation.
207
+ * @param prompt The prompt to convert to a string
208
+ * @returns The string representation of the prompt
209
+ */
210
+ export function promptToString(prompt) {
211
+ if ("prompt" in prompt) {
212
+ // TCompletionPrompt
213
+ return prompt.prompt;
214
+ }
215
+ else if ("messages" in prompt) {
216
+ // TChatPrompt
217
+ return prompt.messages
218
+ .map((msg) => `[${msg.role}] ${msg.content}`)
219
+ .join("\n");
220
+ }
221
+ else if (Array.isArray(prompt)) {
222
+ // OpenAIChatMessage[]
223
+ return prompt
224
+ .map((msg) => `[${msg.role}] ${msg.content}`)
225
+ .join("\n");
226
+ }
227
+ return "";
228
+ }
201
229
  //# sourceMappingURL=prompt.js.map
@@ -10,6 +10,7 @@ import { generateSearchPrompt } from "./helpers/generateSearchPrompt";
10
10
  import { getUserMemory } from "./helpers/getUserMemory";
11
11
  import { createToolDefinitions } from "./helpers/createToolDefinitions";
12
12
  import { TranscriptEntryType, TranscriptRole } from "../../../../interfaces/transcripts/transcripts";
13
+ export const AI_AGENT_TOOLS_WHITELIST = ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"];
13
14
  export const AI_AGENT_JOB = createNodeDescriptor({
14
15
  type: "aiAgentJob",
15
16
  defaultLabel: "AI Agent",
@@ -18,7 +19,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
18
19
  collapsable: true,
19
20
  placement: {
20
21
  children: {
21
- whitelist: ["aiAgentJobDefault", "aiAgentJobTool", "aiAgentJobMCPTool"],
22
+ whitelist: AI_AGENT_TOOLS_WHITELIST,
22
23
  },
23
24
  },
24
25
  },
@@ -407,6 +408,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
407
408
  step: 0.1
408
409
  }
409
410
  },
411
+ {
412
+ key: "useTextAlternativeForLLM",
413
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
414
+ type: "toggle",
415
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
416
+ defaultValue: true,
417
+ },
410
418
  {
411
419
  key: "logErrorToSystem",
412
420
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
@@ -802,6 +810,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
802
810
  "timeoutInMs",
803
811
  "maxTokens",
804
812
  "temperature",
813
+ "useTextAlternativeForLLM",
805
814
  ],
806
815
  },
807
816
  {
@@ -847,9 +856,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
847
856
  ],
848
857
  tags: ["ai", "aiAgent"],
849
858
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
850
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21;
859
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
851
860
  const { api, context, input, profile, flowReferenceId } = cognigy;
852
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
861
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
853
862
  try {
854
863
  if (!aiAgent) {
855
864
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -897,7 +906,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
897
906
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
898
907
  }
899
908
  }
900
- const _22 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _22, cleanedProfile = __rest(_22, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
909
+ const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
901
910
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
902
911
  /**
903
912
  * ----- Knowledge Search Section -----
@@ -1127,7 +1136,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1127
1136
  const transcript = yield api.getTranscript({
1128
1137
  limit: 50,
1129
1138
  rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
1130
- excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
1139
+ excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
1140
+ useTextAlternativeForLLM,
1131
1141
  });
1132
1142
  // For knowledgeSearch "always", we enhance the user input with the knowledge search response data
1133
1143
  if (knowledgeSearchBehavior === "always" &&
@@ -1212,14 +1222,20 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1212
1222
  const mainToolCall = llmResult.toolCalls[0];
1213
1223
  let isMcpToolCall = false;
1214
1224
  // Find the child node with the toolId of the tool call
1215
- let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "aiAgentJobTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
1225
+ let toolChild = undefined;
1226
+ for (const child of childConfigs) {
1227
+ if (child.type === "aiAgentJobTool" && ((_5 = child.config) === null || _5 === void 0 ? void 0 : _5.toolId) && (yield api.parseCognigyScriptText((_6 = child.config) === null || _6 === void 0 ? void 0 : _6.toolId)) === mainToolCall.function.name) {
1228
+ toolChild = child;
1229
+ break;
1230
+ }
1231
+ }
1216
1232
  if (!toolChild && toolMap.has(mainToolCall.function.name)) {
1217
1233
  // If the tool call is from an MCP tool, set the next node to the corresponding child node
1218
1234
  toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
1219
1235
  isMcpToolCall = true;
1220
1236
  }
1221
1237
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1222
- (_5 = api.logDebugError) === null || _5 === void 0 ? void 0 : _5.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1238
+ (_7 = api.logDebugError) === null || _7 === void 0 ? void 0 : _7.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1223
1239
  }
1224
1240
  // Add last tool call to session state for loading it from Tool Answer Node
1225
1241
  api.updateSessionStateValues({
@@ -1227,21 +1243,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1227
1243
  flow: flowReferenceId,
1228
1244
  node: nodeId,
1229
1245
  } }, (isMcpToolCall && {
1230
- mcpServerUrl: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.mcpServerUrl,
1231
- timeout: (_7 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _7 === void 0 ? void 0 : _7.timeout,
1246
+ mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
1247
+ timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
1232
1248
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1233
1249
  })), { toolCall: mainToolCall }),
1234
1250
  });
1235
1251
  // if there are any parameters/arguments, add them to the input slots
1236
1252
  if (mainToolCall.function.arguments) {
1237
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_9 = (_8 = input.aiAgent) === null || _8 === void 0 ? void 0 : _8.toolArgs) !== null && _9 !== void 0 ? _9 : {}), mainToolCall.function.arguments) });
1253
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
1238
1254
  }
1239
1255
  // Debug Message for Tool Calls, configured in the Tool Node
1240
- if ((_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.debugMessage) {
1241
- const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1256
+ if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
1257
+ const toolId = isMcpToolCall ? mainToolCall.function.name : yield api.parseCognigyScriptText(toolChild.config.toolId);
1242
1258
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1243
1259
  // Arguments / Parameters Slots
1244
- const slots = ((_11 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _11 === void 0 ? void 0 : _11.arguments) && Object.keys(mainToolCall.function.arguments);
1260
+ const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
1245
1261
  const hasSlots = slots && slots.length > 0;
1246
1262
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1247
1263
  if (hasSlots) {
@@ -1256,7 +1272,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1256
1272
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1257
1273
  });
1258
1274
  }
1259
- (_12 = api.logDebugMessage) === null || _12 === void 0 ? void 0 : _12.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1275
+ (_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1260
1276
  }
1261
1277
  if (toolChild) {
1262
1278
  api.setNextNode(toolChild.id);
@@ -1281,11 +1297,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1281
1297
  }
1282
1298
  // Optionally output the result immediately
1283
1299
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1284
- yield ((_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, llmResult.result, {}));
1300
+ yield ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
1285
1301
  }
1286
1302
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1287
1303
  // send the finishReason as last output for a stream
1288
- (_14 = api.output) === null || _14 === void 0 ? void 0 : _14.call(api, "", {
1304
+ (_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
1289
1305
  _cognigy: {
1290
1306
  _preventTranscript: true,
1291
1307
  _messageId,
@@ -1308,7 +1324,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1308
1324
  }
1309
1325
  // Add response to Cognigy Input/Context for further usage
1310
1326
  if (storeLocation === "context") {
1311
- (_15 = api.addToContext) === null || _15 === void 0 ? void 0 : _15.call(api, contextKey, llmResult, "simple");
1327
+ (_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
1312
1328
  }
1313
1329
  else if (storeLocation === "input") {
1314
1330
  api.addToInput(inputKey, llmResult);
@@ -1321,14 +1337,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1321
1337
  const errorDetails = {
1322
1338
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1323
1339
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1324
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_16 = error.originalErrorDetails) === null || _16 === void 0 ? void 0 : _16.message),
1340
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
1325
1341
  };
1326
- (_17 = api.emitEvent) === null || _17 === void 0 ? void 0 : _17.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1342
+ (_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1327
1343
  if (logErrorToSystem) {
1328
- (_18 = api.log) === null || _18 === void 0 ? void 0 : _18.call(api, "error", JSON.stringify(errorDetails));
1344
+ (_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
1329
1345
  }
1330
1346
  if (errorHandling !== "stop") {
1331
- (_19 = api.logDebugError) === null || _19 === void 0 ? void 0 : _19.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1347
+ (_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1332
1348
  }
1333
1349
  if (storeErrorInInput) {
1334
1350
  input.aiAgent = input.aiAgent || {};
@@ -1337,7 +1353,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1337
1353
  if (errorHandling === "continue") {
1338
1354
  // output the timeout message
1339
1355
  if (errorMessage) {
1340
- yield ((_20 = api.output) === null || _20 === void 0 ? void 0 : _20.call(api, errorMessage, null));
1356
+ yield ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
1341
1357
  }
1342
1358
  // Set default node as next node
1343
1359
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1349,7 +1365,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1349
1365
  if (!errorHandlingGotoTarget) {
1350
1366
  throw new Error("GoTo Target is required");
1351
1367
  }
1352
- if (!((_21 = api.checkThink) === null || _21 === void 0 ? void 0 : _21.call(api, nodeId))) {
1368
+ if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
1353
1369
  api.resetNextNodes();
1354
1370
  yield api.executeFlow({
1355
1371
  flowNode: {
@@ -34,11 +34,11 @@ export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter
34
34
  }
35
35
  const toolId = child.config.toolId;
36
36
  if ((child.type === "aiAgentJobTool" || child.type === "llmPromptTool") &&
37
- (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
37
+ (!child.config.condition || !!(yield api.parseCognigyScriptCondition(child.config.condition)))) {
38
38
  if (!toolId) {
39
39
  throw new Error(`Tool ID is missing in Tool Node configuration.`);
40
40
  }
41
- const parsedToolId = api.parseCognigyScriptText(toolId);
41
+ const parsedToolId = yield api.parseCognigyScriptText(toolId);
42
42
  if (!validateToolId(parsedToolId)) {
43
43
  throw new Error(`Tool ID ${parsedToolId} is not valid. Please use only alphanumeric characters, dashes and underscores.`);
44
44
  }
@@ -51,7 +51,7 @@ export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter
51
51
  type: "function",
52
52
  function: {
53
53
  name: parsedToolId,
54
- description: api.parseCognigyScriptText(child.config.description),
54
+ description: yield api.parseCognigyScriptText(child.config.description),
55
55
  },
56
56
  };
57
57
  if (useStrict) {
@@ -63,7 +63,7 @@ export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter
63
63
  tools.push(tool);
64
64
  }
65
65
  if ((child.type === "aiAgentJobMCPTool" || child.type === "llmPromptMCPTool") &&
66
- (!child.config.condition || !!api.parseCognigyScriptCondition(child.config.condition))) {
66
+ (!child.config.condition || !!(yield api.parseCognigyScriptCondition(child.config.condition)))) {
67
67
  if (!child.config.mcpServerUrl) {
68
68
  throw new Error(`MCP Server URL is missing in Tool Node configuration.`);
69
69
  }