@cognigy/rest-api-client 2025.15.1 → 2025.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/build/apigroups/MetricsAPIGroup_2_0.js +10 -0
  3. package/build/apigroups/ResourcesAPIGroup_2_0.js +6 -0
  4. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -0
  5. package/build/shared/charts/descriptors/data/debugMessage.js +13 -3
  6. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -49
  7. package/build/shared/charts/descriptors/logic/if/if.js +2 -2
  8. package/build/shared/charts/descriptors/logic/switch/switch.js +30 -21
  9. package/build/shared/charts/descriptors/message/question/question.js +3 -3
  10. package/build/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +2 -2
  11. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +31 -2
  12. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +40 -24
  13. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
  14. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +36 -21
  15. package/build/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
  16. package/build/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  17. package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  18. package/build/shared/generativeAI/getPrompt.js +75 -0
  19. package/build/shared/generativeAI/utils/generativeAIPrompts.js +613 -0
  20. package/build/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +84 -0
  21. package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
  22. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  23. package/build/shared/interfaces/resources/IGetAiAgentJobsTools.js +3 -0
  24. package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +38 -5
  25. package/build/shared/interfaces/resources/ILargeLanguageModel.js +16 -1
  26. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +3 -0
  27. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +3 -0
  28. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +3 -0
  29. package/build/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +3 -0
  30. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
  31. package/build/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +4 -0
  32. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +3 -0
  33. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +3 -0
  34. package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +16 -0
  35. package/build/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +3 -0
  36. package/build/shared/interfaces/security/ICallCounterPreAggregatedValue.js +3 -0
  37. package/build/test.js +39 -0
  38. package/dist/esm/apigroups/MetricsAPIGroup_2_0.js +10 -0
  39. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +6 -0
  40. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -0
  41. package/dist/esm/shared/charts/descriptors/data/debugMessage.js +13 -3
  42. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +48 -50
  43. package/dist/esm/shared/charts/descriptors/logic/if/if.js +2 -2
  44. package/dist/esm/shared/charts/descriptors/logic/switch/switch.js +30 -21
  45. package/dist/esm/shared/charts/descriptors/message/question/question.js +3 -3
  46. package/dist/esm/shared/charts/descriptors/message/question/utils/validateQuestionAnswer.js +4 -3
  47. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +29 -1
  48. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +39 -23
  49. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +4 -4
  50. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +43 -28
  51. package/dist/esm/shared/charts/descriptors/transcripts/addTranscriptStep.js +3 -3
  52. package/dist/esm/shared/charts/descriptors/transcripts/getTranscript.js +23 -3
  53. package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +3 -0
  54. package/dist/esm/shared/generativeAI/getPrompt.js +68 -0
  55. package/dist/esm/shared/generativeAI/utils/generativeAIPrompts.js +610 -0
  56. package/dist/esm/shared/generativeAI/utils/prompts/contextAwareUserQueryRephrasing.js +81 -0
  57. package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +2 -0
  58. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  59. package/dist/esm/shared/interfaces/resources/IGetAiAgentJobsTools.js +2 -0
  60. package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +37 -5
  61. package/dist/esm/shared/interfaces/resources/ILargeLanguageModel.js +14 -0
  62. package/dist/esm/shared/interfaces/restAPI/management/authentication/ICreateJWTToken.js +1 -0
  63. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/ICallCounterPreAggregatedValue_3_0.js +2 -0
  64. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterOrganisationRest_3_0.js +2 -0
  65. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/IGetCallCounterRest_3_0.js +2 -0
  66. package/dist/esm/shared/interfaces/restAPI/metrics/callCounter/v3.0/index.js +2 -0
  67. package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IAiAgentJobNodeWithTools_2_0.js +65 -0
  68. package/dist/esm/shared/interfaces/restAPI/resources/aiAgent/v2.0/IGetAiAgentJobAndToolsRest_2_0 .js +3 -0
  69. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/IIndexKnowledgeDescriptorsRest_2_0.js +2 -0
  70. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/extension/IRunKnowledgeExtensionRest_2_0.js +2 -0
  71. package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/index.js +2 -1
  72. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRunBatch/IStopSimulationRunBatchRest_2_0.js +2 -0
  73. package/dist/esm/shared/interfaces/security/ICallCounterPreAggregatedValue.js +2 -0
  74. package/dist/esm/test.js +39 -0
  75. package/package.json +1 -1
  76. package/types/index.d.ts +299 -42
@@ -388,6 +388,13 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
388
388
  ]
389
389
  }
390
390
  },
391
+ {
392
+ key: "useTextAlternativeForLLM",
393
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__LABEL",
394
+ type: "toggle",
395
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
396
+ defaultValue: true,
397
+ },
391
398
  {
392
399
  key: "customModelOptions",
393
400
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
@@ -539,7 +546,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
539
546
  "frequencyPenalty",
540
547
  "useStop",
541
548
  "stop",
542
- "seed"
549
+ "seed",
550
+ "useTextAlternativeForLLM",
543
551
  ]
544
552
  },
545
553
  {
@@ -625,10 +633,10 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
625
633
  },
626
634
  tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
627
635
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
628
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
636
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
629
637
  const { api, input, flowReferenceId } = cognigy;
630
638
  const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
631
- errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
639
+ errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
632
640
  let prompt = config.prompt || "";
633
641
  const { traceId } = input;
634
642
  // check if custom variables are used and if they have a length modifier
@@ -653,17 +661,17 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
653
661
  }
654
662
  // handle errors from external services, depending on the settings
655
663
  const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
656
- var _w, _x, _y, _z, _0, _1;
664
+ var _y, _z, _0, _1, _2, _3;
657
665
  const compactError = {
658
666
  name: error === null || error === void 0 ? void 0 : error.name,
659
667
  code: error === null || error === void 0 ? void 0 : error.code,
660
668
  message: (error === null || error === void 0 ? void 0 : error.message) || error
661
669
  };
662
670
  // return the requestId if it exist in the error obj.
663
- if ((_w = error === null || error === void 0 ? void 0 : error.meta) === null || _w === void 0 ? void 0 : _w.requestId) {
664
- compactError["requestId"] = (_x = error === null || error === void 0 ? void 0 : error.meta) === null || _x === void 0 ? void 0 : _x.requestId;
671
+ if ((_y = error === null || error === void 0 ? void 0 : error.meta) === null || _y === void 0 ? void 0 : _y.requestId) {
672
+ compactError["requestId"] = (_z = error === null || error === void 0 ? void 0 : error.meta) === null || _z === void 0 ? void 0 : _z.requestId;
665
673
  }
666
- if ((_y = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _y === void 0 ? void 0 : _y.code) {
674
+ if ((_0 = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _0 === void 0 ? void 0 : _0.code) {
667
675
  compactError.code = error.originalErrorDetails.code;
668
676
  }
669
677
  const errorResponse = {
@@ -672,7 +680,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
672
680
  // add error to context or input
673
681
  switch (storeLocation) {
674
682
  case "context":
675
- (_z = api.addToContext) === null || _z === void 0 ? void 0 : _z.call(api, contextKey, errorResponse, "simple");
683
+ (_1 = api.addToContext) === null || _1 === void 0 ? void 0 : _1.call(api, contextKey, errorResponse, "simple");
676
684
  break;
677
685
  default:
678
686
  api.addToInput(inputKey, errorResponse);
@@ -680,7 +688,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
680
688
  if (errorHandling === "continue") {
681
689
  // output the timeout message
682
690
  if (errorMessage) {
683
- yield ((_0 = api.output) === null || _0 === void 0 ? void 0 : _0.call(api, errorMessage, null));
691
+ yield ((_2 = api.output) === null || _2 === void 0 ? void 0 : _2.call(api, errorMessage, null));
684
692
  }
685
693
  // Continue with default node as next node
686
694
  const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
@@ -707,7 +715,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
707
715
  absorbContext: false
708
716
  }
709
717
  };
710
- yield ((_1 = GO_TO.function) === null || _1 === void 0 ? void 0 : _1.call(GO_TO, gotoParams));
718
+ yield ((_3 = GO_TO.function) === null || _3 === void 0 ? void 0 : _3.call(GO_TO, gotoParams));
711
719
  }
712
720
  else {
713
721
  throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
@@ -773,7 +781,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
773
781
  const transcript = yield api.getTranscript({
774
782
  limit: chatTranscriptSteps || 50,
775
783
  rolesWhiteList: [TranscriptRole.USER, TranscriptRole.ASSISTANT, TranscriptRole.TOOL],
776
- excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT]
784
+ excludeDataOnlyMessagesFilter: [TranscriptRole.ASSISTANT],
785
+ useTextAlternativeForLLM,
777
786
  });
778
787
  llmPromptOptions["transcript"] = transcript;
779
788
  llmPromptOptions["chat"] = [{
@@ -817,14 +826,20 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
817
826
  const mainToolCall = llmResult.toolCalls[0];
818
827
  let isMcpToolCall = false;
819
828
  // Find the child node with the toolId of the tool call
820
- let toolChild = childConfigs.find(child => { var _a, _b; return child.type === "llmPromptTool" && ((_a = child.config) === null || _a === void 0 ? void 0 : _a.toolId) && api.parseCognigyScriptText((_b = child.config) === null || _b === void 0 ? void 0 : _b.toolId) === mainToolCall.function.name; });
829
+ let toolChild = undefined;
830
+ for (const child of childConfigs) {
831
+ if (child.type === "llmPromptTool" && ((_e = child.config) === null || _e === void 0 ? void 0 : _e.toolId) && (yield api.parseCognigyScriptText((_f = child.config) === null || _f === void 0 ? void 0 : _f.toolId)) === mainToolCall.function.name) {
832
+ toolChild = child;
833
+ break;
834
+ }
835
+ }
821
836
  if (!toolChild && toolMap.has(mainToolCall.function.name)) {
822
837
  // If the tool call is from an MCP tool, set the next node to the corresponding child node
823
838
  toolChild = childConfigs.find(child => child.id === toolMap.get(mainToolCall.function.name));
824
839
  isMcpToolCall = true;
825
840
  }
826
841
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
827
- (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
842
+ (_g = api.logDebugError) === null || _g === void 0 ? void 0 : _g.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
828
843
  }
829
844
  // Add last tool call to session state for loading it from Tool Answer Node
830
845
  api.updateSessionStateValues({
@@ -832,20 +847,20 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
832
847
  flow: flowReferenceId,
833
848
  node: nodeId,
834
849
  } }, (isMcpToolCall && {
835
- mcpServerUrl: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.mcpServerUrl,
836
- timeout: (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.timeout,
850
+ mcpServerUrl: (_h = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _h === void 0 ? void 0 : _h.mcpServerUrl,
851
+ timeout: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.timeout,
837
852
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
838
853
  })), { toolCall: mainToolCall }),
839
854
  });
840
855
  // if there are any parameters/arguments, add them to the input slots
841
856
  if (mainToolCall.function.arguments) {
842
- input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_j = (_h = input.llmPrompt) === null || _h === void 0 ? void 0 : _h.toolArgs) !== null && _j !== void 0 ? _j : {}), mainToolCall.function.arguments) });
857
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_l = (_k = input.llmPrompt) === null || _k === void 0 ? void 0 : _k.toolArgs) !== null && _l !== void 0 ? _l : {}), mainToolCall.function.arguments) });
843
858
  }
844
859
  // Debug Message for Tool Calls, configured in the Tool Node
845
- if ((_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.debugMessage) {
846
- const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
860
+ if ((_m = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _m === void 0 ? void 0 : _m.debugMessage) {
861
+ const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${yield api.parseCognigyScriptText(toolChild.config.toolId)}`];
847
862
  // Arguments / Parameters Slots
848
- const slots = ((_l = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _l === void 0 ? void 0 : _l.arguments) && Object.keys(mainToolCall.function.arguments);
863
+ const slots = ((_o = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _o === void 0 ? void 0 : _o.arguments) && Object.keys(mainToolCall.function.arguments);
849
864
  const hasSlots = slots && slots.length > 0;
850
865
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
851
866
  if (hasSlots) {
@@ -860,7 +875,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
860
875
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
861
876
  });
862
877
  }
863
- (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
878
+ (_p = api.logDebugMessage) === null || _p === void 0 ? void 0 : _p.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
864
879
  }
865
880
  if (toolChild) {
866
881
  api.setNextNode(toolChild.id);
@@ -885,11 +900,11 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
885
900
  // we stringify objects (e.g. results coming from JSON Mode)
886
901
  // so that the transcript only contains text
887
902
  const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
888
- yield ((_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, resultToOutput, {}));
903
+ yield ((_q = api.output) === null || _q === void 0 ? void 0 : _q.call(api, resultToOutput, {}));
889
904
  }
890
905
  else if (llmResult.finishReason && llmPromptOptions.stream) {
891
906
  // send the finishReason as last output for a stream
892
- (_p = api.output) === null || _p === void 0 ? void 0 : _p.call(api, "", {
907
+ (_r = api.output) === null || _r === void 0 ? void 0 : _r.call(api, "", {
893
908
  _cognigy: {
894
909
  _preventTranscript: true,
895
910
  _messageId,
@@ -912,7 +927,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
912
927
  }
913
928
  // Add response to Cognigy Input/Context for further usage
914
929
  if (storeLocation === "context") {
915
- (_q = api.addToContext) === null || _q === void 0 ? void 0 : _q.call(api, contextKey, llmResult, "simple");
930
+ (_s = api.addToContext) === null || _s === void 0 ? void 0 : _s.call(api, contextKey, llmResult, "simple");
916
931
  }
917
932
  else if (storeLocation === "input") {
918
933
  api.addToInput(inputKey, llmResult);
@@ -925,19 +940,19 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
925
940
  const errorDetailsBase = {
926
941
  name: error === null || error === void 0 ? void 0 : error.name,
927
942
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
928
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_r = error.originalErrorDetails) === null || _r === void 0 ? void 0 : _r.message),
943
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_t = error.originalErrorDetails) === null || _t === void 0 ? void 0 : _t.message),
929
944
  };
930
945
  const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
931
946
  // return the requestId if it exist in the error obj.
932
- if ((_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId) {
947
+ if ((_u = error.meta) === null || _u === void 0 ? void 0 : _u.requestId) {
933
948
  errorDetails["meta"] = {
934
- requestId: (_t = error.meta) === null || _t === void 0 ? void 0 : _t.requestId
949
+ requestId: (_v = error.meta) === null || _v === void 0 ? void 0 : _v.requestId
935
950
  };
936
951
  }
937
952
  if (logErrorToSystem) {
938
- (_u = api.log) === null || _u === void 0 ? void 0 : _u.call(api, "error", JSON.stringify(errorDetailsBase));
953
+ (_w = api.log) === null || _w === void 0 ? void 0 : _w.call(api, "error", JSON.stringify(errorDetailsBase));
939
954
  }
940
- (_v = api.logDebugError) === null || _v === void 0 ? void 0 : _v.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
955
+ (_x = api.logDebugError) === null || _x === void 0 ? void 0 : _x.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
941
956
  yield handleServiceError(errorDetails);
942
957
  return;
943
958
  }
@@ -326,7 +326,7 @@ export const ADD_TRANSCRIPT_STEP = createNodeDescriptor({
326
326
  },
327
327
  tags: ["service", "transcripts"],
328
328
  function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
329
- const { role, agentType, text, data, name, id, input, toolCallId, content, header, message, metadata } = config;
329
+ const { role, text, data, name, id, input, toolCallId, assistantType, content, header, message, metadata } = config;
330
330
  const { api } = cognigy;
331
331
  let log = null;
332
332
  switch (role) {
@@ -342,7 +342,7 @@ export const ADD_TRANSCRIPT_STEP = createNodeDescriptor({
342
342
  };
343
343
  break;
344
344
  case TranscriptRole.ASSISTANT:
345
- if (agentType === TranscriptEntryType.OUTPUT) {
345
+ if (assistantType === TranscriptEntryType.OUTPUT) {
346
346
  log = {
347
347
  role: TranscriptRole.ASSISTANT,
348
348
  type: TranscriptEntryType.OUTPUT,
@@ -353,7 +353,7 @@ export const ADD_TRANSCRIPT_STEP = createNodeDescriptor({
353
353
  }
354
354
  };
355
355
  }
356
- else if (agentType === TranscriptEntryType.TOOL_CALL) {
356
+ else if (assistantType === TranscriptEntryType.TOOL_CALL) {
357
357
  log = {
358
358
  role: TranscriptRole.ASSISTANT,
359
359
  type: TranscriptEntryType.TOOL_CALL,
@@ -68,6 +68,13 @@ export const GET_TRANSCRIPT = createNodeDescriptor({
68
68
  value: "context",
69
69
  }
70
70
  },
71
+ {
72
+ key: "includeTextAlternativeInTranscript",
73
+ type: "toggle",
74
+ label: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__LABEL",
75
+ description: "UI__NODE_EDITOR__GET_TRANSCRIPT__FIELDS__INCLUDE_TEXT_ALTERNATIVE_IN_TRANSCRIPT__DESCRIPTION",
76
+ defaultValue: true,
77
+ },
71
78
  ],
72
79
  sections: [
73
80
  {
@@ -79,18 +86,31 @@ export const GET_TRANSCRIPT = createNodeDescriptor({
79
86
  "inputKey",
80
87
  "contextKey",
81
88
  ]
82
- }
89
+ },
90
+ {
91
+ key: "advanced",
92
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
93
+ defaultCollapsed: true,
94
+ fields: [
95
+ "includeTextAlternativeInTranscript",
96
+ ],
97
+ },
83
98
  ],
84
99
  form: [
85
100
  { type: "field", key: "limit" },
86
101
  { type: "section", key: "storage" },
102
+ { type: "section", key: "advanced" },
87
103
  ],
88
104
  tags: ["service", "transcripts"],
89
105
  function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
90
106
  var _a;
91
- const { limit, storeLocation, inputKey, contextKey } = config;
107
+ const { limit, storeLocation, inputKey, contextKey, includeTextAlternativeInTranscript } = config;
92
108
  const { api } = cognigy;
93
- const transcript = yield api.getTranscript({ limit, excludeDataOnlyMessagesFilter: [TranscriptRole.AGENT, TranscriptRole.ASSISTANT] });
109
+ const transcript = yield api.getTranscript({
110
+ limit,
111
+ excludeDataOnlyMessagesFilter: [TranscriptRole.AGENT],
112
+ includeTextAlternativeInTranscript,
113
+ });
94
114
  if (storeLocation === "context") {
95
115
  (_a = api.addToContext) === null || _a === void 0 ? void 0 : _a.call(api, contextKey, transcript, "simple");
96
116
  }
@@ -143,6 +143,9 @@ class SessionConfigMapper extends BaseMapper {
143
143
  const timeout = Number(spAsrTimeout || asrTimeout);
144
144
  recognizer.asrTimeout = timeout / 1000 || undefined;
145
145
  }
146
+ else if (asrEnabled === false || spAsrEnabled === false) {
147
+ recognizer.asrTimeout = 0;
148
+ }
146
149
  return recognizer;
147
150
  }
148
151
  isDtmfEnabled(sessionParams, dtmf) {
@@ -0,0 +1,68 @@
1
+ /** Custom Modules */
2
+ import { generativeAIPrompts } from "./utils/generativeAIPrompts";
3
+ import { InternalServerError } from "../errors/internalServerError";
4
+ export const isCompletionPrompt = (data) => {
5
+ return typeof data === "object" && data !== null && "prompt" in data;
6
+ };
7
+ export const isChatPrompt = (data) => {
8
+ return typeof data === "object" && data !== null && "messages" in data;
9
+ };
10
+ export const isOpenAIChatPrompt = (data) => {
11
+ return Array.isArray(data) &&
12
+ data.every((item) => typeof item === "object" &&
13
+ item !== null &&
14
+ ("role" in item) &&
15
+ ("content" in item) &&
16
+ (item.role === "system" || item.role === "user" || item.role === "assistant") &&
17
+ (typeof item.content === "string"));
18
+ };
19
+ /**
20
+ * Gets the current prompts for the passed model/useCase
21
+ * @param model - The model to get the prompt for
22
+ * @param useCase - The use case to get the prompt for
23
+ * @param subUseCase - Optional sub-use case to get a specific prompt
24
+ * @param promptParser - Optional function to modify the prompt before returning it
25
+ * @returns {TALLPrompts}
26
+ */
27
+ export const getPrompt = (model, useCase, subUseCase, promptParser) => {
28
+ var _a;
29
+ const loggerMeta = {
30
+ module: "getPrompt.ts",
31
+ label: "generativeAI",
32
+ function: "getPrompt",
33
+ model,
34
+ useCase,
35
+ subUseCase
36
+ };
37
+ let modelPrompts = (_a = generativeAIPrompts[`${model}`]) !== null && _a !== void 0 ? _a : generativeAIPrompts["default"];
38
+ if (!modelPrompts) {
39
+ throw new InternalServerError(`Neither the model "${model}" nor the default fallback have predefined prompts`, undefined, loggerMeta);
40
+ }
41
+ let prompt = modelPrompts[`${useCase}`];
42
+ // generativeAIPrompts[model] has no prompt for use case, so try to fallback to default prompt
43
+ if (!prompt) {
44
+ modelPrompts = generativeAIPrompts["default"];
45
+ if (!modelPrompts) {
46
+ throw new InternalServerError(`The default fallback has no predefined prompts`, undefined, loggerMeta);
47
+ }
48
+ prompt = modelPrompts[`${useCase}`];
49
+ }
50
+ if (!prompt) {
51
+ throw new InternalServerError(`Neither the model "${model}" nor the default fallback define a prompt for useCase "${useCase}"`, undefined, loggerMeta);
52
+ }
53
+ if (subUseCase && prompt && typeof prompt === "object" && `${subUseCase}` in prompt) {
54
+ prompt = prompt[`${subUseCase}`];
55
+ }
56
+ if (!prompt) {
57
+ throw new InternalServerError(`The prompt defined for the model "${model}" or the default fallback, useCase "${useCase}", and subUseCase "${subUseCase}" is invalid`, undefined, loggerMeta);
58
+ }
59
+ try {
60
+ return promptParser
61
+ ? promptParser(JSON.parse(JSON.stringify(prompt)))
62
+ : JSON.parse(JSON.stringify(prompt));
63
+ }
64
+ catch (error) {
65
+ throw new InternalServerError(`Error while parsing prompt for model: ${model} and useCase: ${useCase} and subUseCase: ${subUseCase}`, undefined, Object.assign({ originalError: error }, loggerMeta));
66
+ }
67
+ };
68
+ //# sourceMappingURL=getPrompt.js.map