@cognigy/rest-api-client 2025.13.0 → 2025.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +5 -1
  4. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  5. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +31 -4
  6. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  7. package/build/shared/charts/descriptors/service/GPTPrompt.js +31 -0
  8. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
  9. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +58 -21
  10. package/build/shared/helper/BaseContext.js +3 -1
  11. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  12. package/build/shared/interfaces/resources/IAiAgent.js +1 -1
  13. package/build/shared/interfaces/resources/IExtension.js +12 -13
  14. package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +45 -0
  15. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  16. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  17. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  18. package/build/shared/interfaces/restAPI/simulation/persona/IGenerateBulkPersonaRest_2_0.js +3 -0
  19. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  20. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  21. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  22. package/build/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +6 -1
  23. package/build/shared/interfaces/security/IPermission.js +2 -0
  24. package/build/shared/interfaces/security/IRole.js +3 -0
  25. package/build/shared/interfaces/security/index.js +1 -1
  26. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -1
  27. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +5 -1
  28. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  29. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +31 -4
  30. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  31. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +31 -0
  32. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
  33. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +65 -28
  34. package/dist/esm/shared/helper/BaseContext.js +3 -1
  35. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  36. package/dist/esm/shared/interfaces/resources/IAiAgent.js +1 -1
  37. package/dist/esm/shared/interfaces/resources/IExtension.js +12 -13
  38. package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +42 -0
  39. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  40. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  41. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  42. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGenerateBulkPersonaRest_2_0.js +2 -0
  43. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  44. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  45. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  46. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +5 -0
  47. package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
  48. package/dist/esm/shared/interfaces/security/IRole.js +3 -0
  49. package/dist/esm/shared/interfaces/security/index.js +1 -1
  50. package/package.json +1 -1
  51. package/types/index.d.ts +776 -433
package/CHANGELOG.md CHANGED
@@ -1,3 +1,13 @@
1
+ # 2025.15.0
2
+ Released: July 22nd, 2025
3
+
4
+ Released state of package up to date with Cognigy.AI v2025.15.0
5
+
6
+ # 2025.14.0
7
+ Released: July 08th, 2025
8
+
9
+ Released state of package up to date with Cognigy.AI v2025.14.0
10
+
1
11
  # 2025.13.0
2
12
  Released: June 24th, 2025
3
13
 
@@ -632,7 +632,10 @@ const ResourcesAPIGroup_2_0 = (instance) => {
632
632
  generateDesignTimeLLMOutput: (_a) => {
633
633
  var { projectId } = _a, args = __rest(_a, ["projectId"]);
634
634
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/generate-output/design-time-llm`, "POST", self)(args);
635
- }
635
+ },
636
+ readFlowChartAiAgents: ({ flowId, preferredLocaleId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/flows/${flowId}/chart/nodes/aiagents?${(0, query_1.stringifyQuery)({
637
+ preferredLocaleId
638
+ })}`, "GET", self)(undefined, options)
636
639
  };
637
640
  };
638
641
  exports.ResourcesAPIGroup_2_0 = ResourcesAPIGroup_2_0;
@@ -51,7 +51,11 @@ function SimulationAPIGroup_2_0(instance) {
51
51
  readSimulationRun: (_a, options) => {
52
52
  var { simulationReference, simulationRunBatchReference, simulationRunReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference", "simulationRunReference"]);
53
53
  return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs/${simulationRunReference}?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
54
- }
54
+ },
55
+ getPersonaOptions: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/options", "POST", self)(args, options),
56
+ generatePersona: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/generate", "POST", self)(args, options),
57
+ regeneratePersonaField: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/regenerate-field", "POST", self)(args, options),
58
+ generateBulkPersona: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/generate-bulk", "POST", self)(args, options)
55
59
  };
56
60
  }
57
61
  exports.SimulationAPIGroup_2_0 = SimulationAPIGroup_2_0;
@@ -9,6 +9,12 @@ exports.AMAZON_STORAGE_PROVIDER_CONNECTION = {
9
9
  { fieldName: "secretAccessKey", label: "UI__CONNECTION_EDITOR__FIELD_SECRET_ACCESS_KEY" },
10
10
  { fieldName: "region", label: "UI__CONNECTION_EDITOR__FIELD_REGION" },
11
11
  { fieldName: "bucketName", label: "UI__CONNECTION_EDITOR__FIELD_BUCKET_NAME" },
12
- ]
12
+ {
13
+ fieldName: "customUrl",
14
+ label: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL",
15
+ required: false,
16
+ description: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL_AWS__DESCRIPTION"
17
+ },
18
+ ],
13
19
  };
14
20
  //# sourceMappingURL=amazonStorageProviderConnection.js.map
@@ -641,10 +641,37 @@ New: `;
641
641
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
642
642
  };
643
643
  api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
644
- api.emitToOpsCenter({
645
- subComponent: "KnowledgeAIQueries",
646
- title: error === null || error === void 0 ? void 0 : error.message
647
- });
644
+ if (!(error instanceof errors_1.InternalServerError)) {
645
+ const metadata = api.getMetadata();
646
+ api.emitToOpsCenter({
647
+ projectId: metadata === null || metadata === void 0 ? void 0 : metadata.projectId,
648
+ subComponent: "KnowledgeAIQueries",
649
+ title: error === null || error === void 0 ? void 0 : error.message,
650
+ errorCode: "ERR_FLOW_006",
651
+ metadata: {
652
+ // extra metadata for the error is enriched in service-sentinel
653
+ snapshot: {
654
+ id: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId,
655
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotName,
656
+ },
657
+ flow: {
658
+ referenceId: cognigy.flowReferenceId,
659
+ name: input.flowName,
660
+ },
661
+ node: {
662
+ referenceId: nodeId
663
+ },
664
+ knowledgeStore: {
665
+ referenceId: knowledgeStoreId,
666
+ },
667
+ locale: {
668
+ referenceId: metadata === null || metadata === void 0 ? void 0 : metadata.localeReferenceId,
669
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.localeName,
670
+ },
671
+ },
672
+ isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
673
+ });
674
+ }
648
675
  if ((_a = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.code) {
649
676
  compactError["code"] = error.originalErrorDetails.code;
650
677
  }
@@ -133,8 +133,9 @@ exports.createLastUserInputString = createLastUserInputString;
133
133
  * @param debugLogTokenCount whether to log the token count
134
134
  * @param debugLogRequestAndCompletion whether to log the request and completion
135
135
  * @param cognigy the cognigy object (input, api, etc)
136
+ * @param nodeType the type of the node (optional)
136
137
  */
137
- const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => {
138
+ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, nodeType) => {
138
139
  var _a, _b, _c, _d;
139
140
  const { api, input } = cognigy;
140
141
  if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
@@ -151,14 +152,21 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
151
152
  if (debugLogTokenCount) {
152
153
  if (prompt) {
153
154
  const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
154
- requestTokenMessage = ` (${requestTokens} Tokens)`;
155
+ requestTokenMessage = ` (${nodeType === "llmPromptV2" ? "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__FULL_REQUEST: " : ""}${requestTokens} Tokens)`;
155
156
  }
156
157
  if (response) {
157
158
  const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
158
159
  completionTokenMessage = ` (${completionTokens} Tokens)`;
159
160
  }
160
161
  }
161
- api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER");
162
+ let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
163
+ let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
164
+ if (nodeType === "llmPromptV2") {
165
+ inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__SYSTEM_PROMPT";
166
+ headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
167
+ }
168
+ ;
169
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
162
170
  }
163
171
  catch (err) { }
164
172
  }
@@ -587,6 +587,37 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
587
587
  const errorResponse = {
588
588
  error: compactError,
589
589
  };
590
+ if (!(error instanceof errors_1.InternalServerError)) {
591
+ const metadata = api.getMetadata();
592
+ api.emitToOpsCenter({
593
+ projectId: metadata === null || metadata === void 0 ? void 0 : metadata.projectId,
594
+ subComponent: "LargeLanguageModelCalls",
595
+ title: error === null || error === void 0 ? void 0 : error.message,
596
+ errorCode: "ERR_FLOW_005",
597
+ isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
598
+ metadata: {
599
+ // extra metadata for the error is enriched in service-sentinel
600
+ snapshot: {
601
+ id: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId,
602
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotName,
603
+ },
604
+ flow: {
605
+ referenceId: cognigy.flowReferenceId,
606
+ name: input.flowName,
607
+ },
608
+ node: {
609
+ referenceId: nodeId
610
+ },
611
+ llm: {
612
+ referenceId: llmProviderReferenceId,
613
+ },
614
+ locale: {
615
+ referenceId: metadata === null || metadata === void 0 ? void 0 : metadata.localeReferenceId,
616
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.localeName,
617
+ },
618
+ }
619
+ });
620
+ }
590
621
  // add error to context or input
591
622
  switch (storeLocation) {
592
623
  case "context":
@@ -512,6 +512,13 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
512
512
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_CONFIG__DESCRIPTION",
513
513
  defaultValue: true
514
514
  },
515
+ {
516
+ key: "debugLogLLMLatency",
517
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
518
+ type: "toggle",
519
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
520
+ defaultValue: false
521
+ },
515
522
  {
516
523
  key: "storeLocation",
517
524
  type: "select",
@@ -831,7 +838,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
831
838
  "debugResult",
832
839
  "debugLogTokenCount",
833
840
  "debugLogSystemPrompt",
834
- "debugLogToolDefinitions"
841
+ "debugLogToolDefinitions",
842
+ "debugLogLLMLatency"
835
843
  ],
836
844
  }
837
845
  ],
@@ -852,9 +860,9 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
852
860
  ],
853
861
  tags: ["ai", "aiAgent"],
854
862
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
855
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20;
863
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21;
856
864
  const { api, context, input, profile, flowReferenceId } = cognigy;
857
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
865
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
858
866
  try {
859
867
  if (!aiAgent) {
860
868
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -902,7 +910,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
902
910
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
903
911
  }
904
912
  }
905
- const _21 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _21, cleanedProfile = __rest(_21, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
913
+ const _22 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _22, cleanedProfile = __rest(_22, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
906
914
  const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
907
915
  /**
908
916
  * ----- Knowledge Search Section -----
@@ -1152,6 +1160,10 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1152
1160
  var _a;
1153
1161
  text = isStreamingChannel ? text : text.trim();
1154
1162
  if (text) {
1163
+ // Record first output time for debugging if not already recorded
1164
+ if (debugLogLLMLatency && firstOutputTime === null) {
1165
+ firstOutputTime = Date.now();
1166
+ }
1155
1167
  // if we got text, we output it, but prevent it from being added to the transcript
1156
1168
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
1157
1169
  _cognigy: {
@@ -1174,13 +1186,38 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1174
1186
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1175
1187
  (_1 = api.setAnalyticsData) === null || _1 === void 0 ? void 0 : _1.call(api, "understood", "true");
1176
1188
  input.understood = true;
1189
+ // Start measuring LLM latency and time to first output if debug flag is enabled
1190
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
1191
+ let firstOutputTime = null;
1177
1192
  const fullLlmResult = await ((_2 = api.runGenerativeAIPrompt) === null || _2 === void 0 ? void 0 : _2.call(api, llmPromptOptions, "aiAgent"));
1193
+ // End measuring times and log if debug flag is enabled
1194
+ if (debugLogLLMLatency) {
1195
+ const llmEndTime = Date.now();
1196
+ const debugMessages = [];
1197
+ const llmLatencyMs = llmEndTime - llmStartTime;
1198
+ let timeToFirstOutputLabel;
1199
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
1200
+ timeToFirstOutputLabel = " - (tool call)";
1201
+ }
1202
+ else if (firstOutputTime === null) {
1203
+ timeToFirstOutputLabel = " - (no output)";
1204
+ }
1205
+ else {
1206
+ firstOutputTime = firstOutputTime || llmEndTime;
1207
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
1208
+ }
1209
+ if (storeLocation === "stream") {
1210
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
1211
+ }
1212
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
1213
+ (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
1214
+ }
1178
1215
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1179
1216
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1180
1217
  const tokenUsage = fullLlmResult.tokenUsage;
1181
1218
  // Send optional debug message with token usage
1182
1219
  if (debugLogTokenCount && tokenUsage) {
1183
- (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1220
+ (_4 = api.logDebugMessage) === null || _4 === void 0 ? void 0 : _4.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1184
1221
  }
1185
1222
  // Identify if the result is a tool call
1186
1223
  // If response is a tool call, set next node for Tools
@@ -1195,7 +1232,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1195
1232
  isMcpToolCall = true;
1196
1233
  }
1197
1234
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1198
- (_4 = api.logDebugError) === null || _4 === void 0 ? void 0 : _4.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1235
+ (_5 = api.logDebugError) === null || _5 === void 0 ? void 0 : _5.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1199
1236
  }
1200
1237
  // Add last tool call to session state for loading it from Tool Answer Node
1201
1238
  api.updateSessionStateValues({
@@ -1203,21 +1240,21 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1203
1240
  flow: flowReferenceId,
1204
1241
  node: nodeId,
1205
1242
  } }, (isMcpToolCall && {
1206
- mcpServerUrl: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.mcpServerUrl,
1207
- timeout: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.timeout,
1243
+ mcpServerUrl: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.mcpServerUrl,
1244
+ timeout: (_7 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _7 === void 0 ? void 0 : _7.timeout,
1208
1245
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1209
1246
  })), { toolCall: mainToolCall }),
1210
1247
  });
1211
1248
  // if there are any parameters/arguments, add them to the input slots
1212
1249
  if (mainToolCall.function.arguments) {
1213
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_8 = (_7 = input.aiAgent) === null || _7 === void 0 ? void 0 : _7.toolArgs) !== null && _8 !== void 0 ? _8 : {}), mainToolCall.function.arguments) });
1250
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_9 = (_8 = input.aiAgent) === null || _8 === void 0 ? void 0 : _8.toolArgs) !== null && _9 !== void 0 ? _9 : {}), mainToolCall.function.arguments) });
1214
1251
  }
1215
1252
  // Debug Message for Tool Calls, configured in the Tool Node
1216
- if ((_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.debugMessage) {
1253
+ if ((_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.debugMessage) {
1217
1254
  const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1218
1255
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1219
1256
  // Arguments / Parameters Slots
1220
- const slots = ((_10 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _10 === void 0 ? void 0 : _10.arguments) && Object.keys(mainToolCall.function.arguments);
1257
+ const slots = ((_11 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _11 === void 0 ? void 0 : _11.arguments) && Object.keys(mainToolCall.function.arguments);
1221
1258
  const hasSlots = slots && slots.length > 0;
1222
1259
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1223
1260
  if (hasSlots) {
@@ -1232,7 +1269,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1232
1269
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1233
1270
  });
1234
1271
  }
1235
- (_11 = api.logDebugMessage) === null || _11 === void 0 ? void 0 : _11.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1272
+ (_12 = api.logDebugMessage) === null || _12 === void 0 ? void 0 : _12.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1236
1273
  }
1237
1274
  if (toolChild) {
1238
1275
  api.setNextNode(toolChild.id);
@@ -1257,11 +1294,11 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1257
1294
  }
1258
1295
  // Optionally output the result immediately
1259
1296
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1260
- await ((_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, llmResult.result, {}));
1297
+ await ((_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, llmResult.result, {}));
1261
1298
  }
1262
1299
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1263
1300
  // send the finishReason as last output for a stream
1264
- (_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, "", {
1301
+ (_14 = api.output) === null || _14 === void 0 ? void 0 : _14.call(api, "", {
1265
1302
  _cognigy: {
1266
1303
  _preventTranscript: true,
1267
1304
  _messageId,
@@ -1284,7 +1321,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1284
1321
  }
1285
1322
  // Add response to Cognigy Input/Context for further usage
1286
1323
  if (storeLocation === "context") {
1287
- (_14 = api.addToContext) === null || _14 === void 0 ? void 0 : _14.call(api, contextKey, llmResult, "simple");
1324
+ (_15 = api.addToContext) === null || _15 === void 0 ? void 0 : _15.call(api, contextKey, llmResult, "simple");
1288
1325
  }
1289
1326
  else if (storeLocation === "input") {
1290
1327
  api.addToInput(inputKey, llmResult);
@@ -1297,14 +1334,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1297
1334
  const errorDetails = {
1298
1335
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1299
1336
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1300
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_15 = error.originalErrorDetails) === null || _15 === void 0 ? void 0 : _15.message),
1337
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_16 = error.originalErrorDetails) === null || _16 === void 0 ? void 0 : _16.message),
1301
1338
  };
1302
- (_16 = api.emitEvent) === null || _16 === void 0 ? void 0 : _16.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1339
+ (_17 = api.emitEvent) === null || _17 === void 0 ? void 0 : _17.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1303
1340
  if (logErrorToSystem) {
1304
- (_17 = api.log) === null || _17 === void 0 ? void 0 : _17.call(api, "error", JSON.stringify(errorDetails));
1341
+ (_18 = api.log) === null || _18 === void 0 ? void 0 : _18.call(api, "error", JSON.stringify(errorDetails));
1305
1342
  }
1306
1343
  if (errorHandling !== "stop") {
1307
- (_18 = api.logDebugError) === null || _18 === void 0 ? void 0 : _18.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1344
+ (_19 = api.logDebugError) === null || _19 === void 0 ? void 0 : _19.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1308
1345
  }
1309
1346
  if (storeErrorInInput) {
1310
1347
  input.aiAgent = input.aiAgent || {};
@@ -1313,7 +1350,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1313
1350
  if (errorHandling === "continue") {
1314
1351
  // output the timeout message
1315
1352
  if (errorMessage) {
1316
- await ((_19 = api.output) === null || _19 === void 0 ? void 0 : _19.call(api, errorMessage, null));
1353
+ await ((_20 = api.output) === null || _20 === void 0 ? void 0 : _20.call(api, errorMessage, null));
1317
1354
  }
1318
1355
  // Set default node as next node
1319
1356
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1325,7 +1362,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1325
1362
  if (!errorHandlingGotoTarget) {
1326
1363
  throw new Error("GoTo Target is required");
1327
1364
  }
1328
- if (!((_20 = api.checkThink) === null || _20 === void 0 ? void 0 : _20.call(api, nodeId))) {
1365
+ if (!((_21 = api.checkThink) === null || _21 === void 0 ? void 0 : _21.call(api, nodeId))) {
1329
1366
  api.resetNextNodes();
1330
1367
  await api.executeFlow({
1331
1368
  flowNode: {
@@ -331,9 +331,16 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
331
331
  },
332
332
  {
333
333
  key: "debugLogRequestAndCompletion",
334
- label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__LABEL",
334
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__LABEL",
335
335
  type: "toggle",
336
- description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__DESCRIPTION",
336
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__DESCRIPTION",
337
+ defaultValue: false
338
+ },
339
+ {
340
+ key: "debugLogLLMLatency",
341
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
342
+ type: "toggle",
343
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
337
344
  defaultValue: false
338
345
  },
339
346
  {
@@ -592,6 +599,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
592
599
  "debugDescription",
593
600
  "debugLogTokenCount",
594
601
  "debugLogRequestAndCompletion",
602
+ "debugLogLLMLatency",
595
603
  "debugLogToolDefinitions"
596
604
  ]
597
605
  },
@@ -630,9 +638,9 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
630
638
  },
631
639
  tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
632
640
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
633
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
641
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
634
642
  const { api, input, flowReferenceId } = cognigy;
635
- const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
643
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
636
644
  errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
637
645
  let prompt = config.prompt || "";
638
646
  const { traceId } = input;
@@ -721,6 +729,8 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
721
729
  try {
722
730
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
723
731
  const _messageId = (0, crypto_1.randomUUID)();
732
+ // Start measuring LLM latency and time to first output if debug flag is enabled
733
+ let firstOutputTime = null;
724
734
  /**
725
735
  * Retrieve the tool definitions from the child nodes
726
736
  */
@@ -737,6 +747,10 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
737
747
  var _a;
738
748
  text = isStreamingChannel ? text : text.trim();
739
749
  if (text) {
750
+ // Record first output time for debugging if not already recorded
751
+ if (debugLogLLMLatency && firstOutputTime === null) {
752
+ firstOutputTime = Date.now();
753
+ }
740
754
  // if we got text, we output it, but prevent it from being added to the transcript
741
755
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
742
756
  _cognigy: {
@@ -780,15 +794,38 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
780
794
  content: prompt
781
795
  }];
782
796
  }
797
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
783
798
  // Run the LLM Query
784
799
  const fullLlmResult = await api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
800
+ // End measuring times and log if debug flag is enabled
801
+ if (debugLogLLMLatency) {
802
+ const llmEndTime = Date.now();
803
+ const debugMessages = [];
804
+ const llmLatencyMs = llmEndTime - llmStartTime;
805
+ let timeToFirstOutputLabel;
806
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
807
+ timeToFirstOutputLabel = " - (tool call)";
808
+ }
809
+ else if (firstOutputTime === null) {
810
+ timeToFirstOutputLabel = " - (no output)";
811
+ }
812
+ else {
813
+ firstOutputTime = firstOutputTime || llmEndTime;
814
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
815
+ }
816
+ if (storeLocation === "stream") {
817
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
818
+ }
819
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
820
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
821
+ }
785
822
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
786
823
  const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
787
824
  if (debugLogToolDefinitions) {
788
- (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
825
+ (_d = api.logDebugMessage) === null || _d === void 0 ? void 0 : _d.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
789
826
  }
790
827
  // if we're in adminconsole or following a session, process debugging options
791
- (input.endpointType === "adminconsole" || isFollowSessionActive) && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
828
+ (input.endpointType === "adminconsole" || isFollowSessionActive) && (0, prompt_1.writeLLMDebugLogs)("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, "llmPromptV2");
792
829
  if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
793
830
  const mainToolCall = llmResult.toolCalls[0];
794
831
  let isMcpToolCall = false;
@@ -800,7 +837,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
800
837
  isMcpToolCall = true;
801
838
  }
802
839
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
803
- (_d = api.logDebugError) === null || _d === void 0 ? void 0 : _d.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
840
+ (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
804
841
  }
805
842
  // Add last tool call to session state for loading it from Tool Answer Node
806
843
  api.updateSessionStateValues({
@@ -808,20 +845,20 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
808
845
  flow: flowReferenceId,
809
846
  node: nodeId,
810
847
  } }, (isMcpToolCall && {
811
- mcpServerUrl: (_e = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _e === void 0 ? void 0 : _e.mcpServerUrl,
812
- timeout: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.timeout,
848
+ mcpServerUrl: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.mcpServerUrl,
849
+ timeout: (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.timeout,
813
850
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
814
851
  })), { toolCall: mainToolCall }),
815
852
  });
816
853
  // if there are any parameters/arguments, add them to the input slots
817
854
  if (mainToolCall.function.arguments) {
818
- input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_h = (_g = input.llmPrompt) === null || _g === void 0 ? void 0 : _g.toolArgs) !== null && _h !== void 0 ? _h : {}), mainToolCall.function.arguments) });
855
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_j = (_h = input.llmPrompt) === null || _h === void 0 ? void 0 : _h.toolArgs) !== null && _j !== void 0 ? _j : {}), mainToolCall.function.arguments) });
819
856
  }
820
857
  // Debug Message for Tool Calls, configured in the Tool Node
821
- if ((_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.debugMessage) {
858
+ if ((_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.debugMessage) {
822
859
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
823
860
  // Arguments / Parameters Slots
824
- const slots = ((_k = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _k === void 0 ? void 0 : _k.arguments) && Object.keys(mainToolCall.function.arguments);
861
+ const slots = ((_l = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _l === void 0 ? void 0 : _l.arguments) && Object.keys(mainToolCall.function.arguments);
825
862
  const hasSlots = slots && slots.length > 0;
826
863
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
827
864
  if (hasSlots) {
@@ -836,7 +873,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
836
873
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
837
874
  });
838
875
  }
839
- (_l = api.logDebugMessage) === null || _l === void 0 ? void 0 : _l.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
876
+ (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
840
877
  }
841
878
  if (toolChild) {
842
879
  api.setNextNode(toolChild.id);
@@ -861,11 +898,11 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
861
898
  // we stringify objects (e.g. results coming from JSON Mode)
862
899
  // so that the transcript only contains text
863
900
  const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
864
- await ((_m = api.output) === null || _m === void 0 ? void 0 : _m.call(api, resultToOutput, {}));
901
+ await ((_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, resultToOutput, {}));
865
902
  }
866
903
  else if (llmResult.finishReason && llmPromptOptions.stream) {
867
904
  // send the finishReason as last output for a stream
868
- (_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, "", {
905
+ (_p = api.output) === null || _p === void 0 ? void 0 : _p.call(api, "", {
869
906
  _cognigy: {
870
907
  _preventTranscript: true,
871
908
  _messageId,
@@ -888,7 +925,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
888
925
  }
889
926
  // Add response to Cognigy Input/Context for further usage
890
927
  if (storeLocation === "context") {
891
- (_p = api.addToContext) === null || _p === void 0 ? void 0 : _p.call(api, contextKey, llmResult, "simple");
928
+ (_q = api.addToContext) === null || _q === void 0 ? void 0 : _q.call(api, contextKey, llmResult, "simple");
892
929
  }
893
930
  else if (storeLocation === "input") {
894
931
  api.addToInput(inputKey, llmResult);
@@ -901,19 +938,19 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
901
938
  const errorDetailsBase = {
902
939
  name: error === null || error === void 0 ? void 0 : error.name,
903
940
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
904
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_q = error.originalErrorDetails) === null || _q === void 0 ? void 0 : _q.message),
941
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_r = error.originalErrorDetails) === null || _r === void 0 ? void 0 : _r.message),
905
942
  };
906
943
  const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
907
944
  // return the requestId if it exist in the error obj.
908
- if ((_r = error.meta) === null || _r === void 0 ? void 0 : _r.requestId) {
945
+ if ((_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId) {
909
946
  errorDetails["meta"] = {
910
- requestId: (_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId
947
+ requestId: (_t = error.meta) === null || _t === void 0 ? void 0 : _t.requestId
911
948
  };
912
949
  }
913
950
  if (logErrorToSystem) {
914
- (_t = api.log) === null || _t === void 0 ? void 0 : _t.call(api, "error", JSON.stringify(errorDetailsBase));
951
+ (_u = api.log) === null || _u === void 0 ? void 0 : _u.call(api, "error", JSON.stringify(errorDetailsBase));
915
952
  }
916
- (_u = api.logDebugError) === null || _u === void 0 ? void 0 : _u.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
953
+ (_v = api.logDebugError) === null || _v === void 0 ? void 0 : _v.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
917
954
  await handleServiceError(errorDetails);
918
955
  return;
919
956
  }
@@ -153,7 +153,9 @@ class BaseContext {
153
153
  return (c) ? c : undefined;
154
154
  }
155
155
  else { // there is none
156
- return (this.context[key] || this.context[key] === 0) ? this.context[key] : undefined;
156
+ return this.context[key] !== null && this.context[key] !== undefined
157
+ ? this.context[key]
158
+ : undefined;
157
159
  }
158
160
  }
159
161
  /**
@@ -36,6 +36,9 @@ exports.createHandoverRequestDataSchema = {
36
36
  "type": "string",
37
37
  "format": "mongo-id",
38
38
  },
39
+ "locale": {
40
+ "type": "string",
41
+ },
39
42
  "userId": {
40
43
  "type": "string"
41
44
  },
@@ -198,6 +201,9 @@ exports.sendMessageToProviderSchema = {
198
201
  "type": "string",
199
202
  "format": "mongo-id",
200
203
  },
204
+ "locale": {
205
+ "type": "string",
206
+ },
201
207
  "userId": {
202
208
  "type": "string"
203
209
  },
@@ -13,7 +13,7 @@ exports.aiAgentDataSchema = {
13
13
  name: { type: "string", format: "resource-name" },
14
14
  image: { type: "string" },
15
15
  imageOptimizedFormat: { type: "boolean" },
16
- knowledgeReferenceId: { type: "string", format: "uuid" },
16
+ knowledgeReferenceId: { type: ["string", "null"], format: "uuid" },
17
17
  description: { type: "string", maxLength: 1000 },
18
18
  speakingStyle: {
19
19
  type: "object",