@cognigy/rest-api-client 2025.13.0 → 2025.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +4 -1
  4. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  5. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +6 -4
  6. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  7. package/build/shared/charts/descriptors/service/GPTPrompt.js +6 -0
  8. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
  9. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +58 -21
  10. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  11. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  12. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  13. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  14. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  15. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  16. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  17. package/build/shared/interfaces/security/IPermission.js +2 -0
  18. package/build/shared/interfaces/security/IRole.js +2 -0
  19. package/build/shared/interfaces/security/index.js +1 -1
  20. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -1
  21. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -1
  22. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  23. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +6 -4
  24. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  25. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +6 -0
  26. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
  27. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +65 -28
  28. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  29. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  30. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  31. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  32. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  33. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  34. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  35. package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
  36. package/dist/esm/shared/interfaces/security/IRole.js +2 -0
  37. package/dist/esm/shared/interfaces/security/index.js +1 -1
  38. package/package.json +1 -1
  39. package/types/index.d.ts +349 -224
@@ -23,6 +23,7 @@ exports.projectRolesWithSpecialOrgRights = exports.availableRoles = exports.proj
23
23
  * - userManager
24
24
  * - userDetailsViewer
25
25
  * - voiceGatewayUser
26
+ * - opsCenterUser
26
27
  */
27
28
  exports.organisationWideRoles = [
28
29
  "admin",
@@ -40,6 +41,7 @@ exports.organisationWideRoles = [
40
41
  "userManager",
41
42
  "userDetailsViewer",
42
43
  "voiceGatewayUser",
44
+ "opsCenterUser",
43
45
  ];
44
46
  /**
45
47
  * @openapi
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.identityProviderSchema = exports.oidcIdentityProviderDataSchema = exports.samlIdentityProviderDataSchema = exports.operations = exports.availableRoles = exports.projectWideRoles = exports.organisationWideRoles = exports.availablePermissions = exports.COMPUTED_ACL_HASH_VERSION = void 0;
4
- exports.COMPUTED_ACL_HASH_VERSION = "v20";
4
+ exports.COMPUTED_ACL_HASH_VERSION = "v21";
5
5
  var IPermission_1 = require("./IPermission");
6
6
  Object.defineProperty(exports, "availablePermissions", { enumerable: true, get: function () { return IPermission_1.availablePermissions; } });
7
7
  var IRole_1 = require("./IRole");
@@ -619,7 +619,10 @@ export const ResourcesAPIGroup_2_0 = (instance) => {
619
619
  generateDesignTimeLLMOutput: (_a) => {
620
620
  var { projectId } = _a, args = __rest(_a, ["projectId"]);
621
621
  return GenericAPIFn(`/new/v2.0/projects/${projectId}/generate-output/design-time-llm`, "POST", self)(args);
622
- }
622
+ },
623
+ readFlowChartAiAgents: ({ flowId, preferredLocaleId }, options) => GenericAPIFn(`/new/v2.0/flows/${flowId}/chart/nodes/output/aiagents?${stringifyQuery({
624
+ preferredLocaleId
625
+ })}`, "GET", self)(undefined, options)
623
626
  };
624
627
  };
625
628
  //# sourceMappingURL=ResourcesAPIGroup_2_0.js.map
@@ -38,7 +38,10 @@ export function SimulationAPIGroup_2_0(instance) {
38
38
  readSimulationRun: (_a, options) => {
39
39
  var { simulationReference, simulationRunBatchReference, simulationRunReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference", "simulationRunReference"]);
40
40
  return GenericAPIFn(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs/${simulationRunReference}?${stringifyQuery(args)}`, "GET", self)(undefined, options);
41
- }
41
+ },
42
+ getPersonaOptions: (args, options) => GenericAPIFn("/testing/beta/personas/options", "POST", self)(args, options),
43
+ generatePersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate", "POST", self)(args, options),
44
+ regeneratePersonaField: (args, options) => GenericAPIFn("/testing/beta/personas/regenerate-field", "POST", self)(args, options)
42
45
  };
43
46
  }
44
47
  //# sourceMappingURL=SimulationAPIGroup_2_0.js.map
@@ -6,6 +6,12 @@ export const AMAZON_STORAGE_PROVIDER_CONNECTION = {
6
6
  { fieldName: "secretAccessKey", label: "UI__CONNECTION_EDITOR__FIELD_SECRET_ACCESS_KEY" },
7
7
  { fieldName: "region", label: "UI__CONNECTION_EDITOR__FIELD_REGION" },
8
8
  { fieldName: "bucketName", label: "UI__CONNECTION_EDITOR__FIELD_BUCKET_NAME" },
9
- ]
9
+ {
10
+ fieldName: "customUrl",
11
+ label: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL",
12
+ required: false,
13
+ description: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL_AWS__DESCRIPTION"
14
+ },
15
+ ],
10
16
  };
11
17
  //# sourceMappingURL=amazonStorageProviderConnection.js.map
@@ -639,10 +639,12 @@ New: `;
639
639
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
640
640
  };
641
641
  api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
642
- api.emitToOpsCenter({
643
- subComponent: "KnowledgeAIQueries",
644
- title: error === null || error === void 0 ? void 0 : error.message
645
- });
642
+ if (!(error instanceof InternalServerError)) {
643
+ api.emitToOpsCenter({
644
+ subComponent: "KnowledgeAIQueries",
645
+ title: error === null || error === void 0 ? void 0 : error.message
646
+ });
647
+ }
646
648
  if ((_m = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _m === void 0 ? void 0 : _m.code) {
647
649
  compactError["code"] = error.originalErrorDetails.code;
648
650
  }
@@ -125,8 +125,9 @@ export const createLastUserInputString = (lastConversationEntries, turnLimit = 1
125
125
  * @param debugLogTokenCount whether to log the token count
126
126
  * @param debugLogRequestAndCompletion whether to log the request and completion
127
127
  * @param cognigy the cognigy object (input, api, etc)
128
+ * @param nodeType the type of the node (optional)
128
129
  */
129
- export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => __awaiter(void 0, void 0, void 0, function* () {
130
+ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, nodeType) => __awaiter(void 0, void 0, void 0, function* () {
130
131
  var _a, _b, _c, _d;
131
132
  const { api, input } = cognigy;
132
133
  if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
@@ -143,14 +144,21 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
143
144
  if (debugLogTokenCount) {
144
145
  if (prompt) {
145
146
  const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
146
- requestTokenMessage = ` (${requestTokens} Tokens)`;
147
+ requestTokenMessage = ` (${nodeType === "llmPromptV2" ? "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__FULL_REQUEST: " : ""}${requestTokens} Tokens)`;
147
148
  }
148
149
  if (response) {
149
150
  const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
150
151
  completionTokenMessage = ` (${completionTokens} Tokens)`;
151
152
  }
152
153
  }
153
- api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER");
154
+ let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
155
+ let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
156
+ if (nodeType === "llmPromptV2") {
157
+ inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__SYSTEM_PROMPT";
158
+ headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
159
+ }
160
+ ;
161
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
154
162
  }
155
163
  catch (err) { }
156
164
  }
@@ -585,6 +585,12 @@ export const GPT_PROMPT = createNodeDescriptor({
585
585
  const errorResponse = {
586
586
  error: compactError,
587
587
  };
588
+ if (!(error instanceof InternalServerError)) {
589
+ api.emitToOpsCenter({
590
+ subComponent: "LargeLanguageModelCalls",
591
+ title: error === null || error === void 0 ? void 0 : error.message
592
+ });
593
+ }
588
594
  // add error to context or input
589
595
  switch (storeLocation) {
590
596
  case "context":
@@ -499,6 +499,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
499
499
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_CONFIG__DESCRIPTION",
500
500
  defaultValue: true
501
501
  },
502
+ {
503
+ key: "debugLogLLMLatency",
504
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
505
+ type: "toggle",
506
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
507
+ defaultValue: false
508
+ },
502
509
  {
503
510
  key: "storeLocation",
504
511
  type: "select",
@@ -818,7 +825,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
818
825
  "debugResult",
819
826
  "debugLogTokenCount",
820
827
  "debugLogSystemPrompt",
821
- "debugLogToolDefinitions"
828
+ "debugLogToolDefinitions",
829
+ "debugLogLLMLatency"
822
830
  ],
823
831
  }
824
832
  ],
@@ -839,9 +847,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
839
847
  ],
840
848
  tags: ["ai", "aiAgent"],
841
849
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
842
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20;
850
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21;
843
851
  const { api, context, input, profile, flowReferenceId } = cognigy;
844
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
852
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
845
853
  try {
846
854
  if (!aiAgent) {
847
855
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -889,7 +897,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
889
897
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
890
898
  }
891
899
  }
892
- const _21 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _21, cleanedProfile = __rest(_21, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
900
+ const _22 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _22, cleanedProfile = __rest(_22, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
893
901
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
894
902
  /**
895
903
  * ----- Knowledge Search Section -----
@@ -1139,6 +1147,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1139
1147
  var _a;
1140
1148
  text = isStreamingChannel ? text : text.trim();
1141
1149
  if (text) {
1150
+ // Record first output time for debugging if not already recorded
1151
+ if (debugLogLLMLatency && firstOutputTime === null) {
1152
+ firstOutputTime = Date.now();
1153
+ }
1142
1154
  // if we got text, we output it, but prevent it from being added to the transcript
1143
1155
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
1144
1156
  _cognigy: {
@@ -1161,13 +1173,38 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1161
1173
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1162
1174
  (_1 = api.setAnalyticsData) === null || _1 === void 0 ? void 0 : _1.call(api, "understood", "true");
1163
1175
  input.understood = true;
1176
+ // Start measuring LLM latency and time to first output if debug flag is enabled
1177
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
1178
+ let firstOutputTime = null;
1164
1179
  const fullLlmResult = yield ((_2 = api.runGenerativeAIPrompt) === null || _2 === void 0 ? void 0 : _2.call(api, llmPromptOptions, "aiAgent"));
1180
+ // End measuring times and log if debug flag is enabled
1181
+ if (debugLogLLMLatency) {
1182
+ const llmEndTime = Date.now();
1183
+ const debugMessages = [];
1184
+ const llmLatencyMs = llmEndTime - llmStartTime;
1185
+ let timeToFirstOutputLabel;
1186
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
1187
+ timeToFirstOutputLabel = " - (tool call)";
1188
+ }
1189
+ else if (firstOutputTime === null) {
1190
+ timeToFirstOutputLabel = " - (no output)";
1191
+ }
1192
+ else {
1193
+ firstOutputTime = firstOutputTime || llmEndTime;
1194
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
1195
+ }
1196
+ if (storeLocation === "stream") {
1197
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
1198
+ }
1199
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
1200
+ (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
1201
+ }
1165
1202
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1166
1203
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1167
1204
  const tokenUsage = fullLlmResult.tokenUsage;
1168
1205
  // Send optional debug message with token usage
1169
1206
  if (debugLogTokenCount && tokenUsage) {
1170
- (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1207
+ (_4 = api.logDebugMessage) === null || _4 === void 0 ? void 0 : _4.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1171
1208
  }
1172
1209
  // Identify if the result is a tool call
1173
1210
  // If response is a tool call, set next node for Tools
@@ -1182,7 +1219,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1182
1219
  isMcpToolCall = true;
1183
1220
  }
1184
1221
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1185
- (_4 = api.logDebugError) === null || _4 === void 0 ? void 0 : _4.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1222
+ (_5 = api.logDebugError) === null || _5 === void 0 ? void 0 : _5.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1186
1223
  }
1187
1224
  // Add last tool call to session state for loading it from Tool Answer Node
1188
1225
  api.updateSessionStateValues({
@@ -1190,21 +1227,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1190
1227
  flow: flowReferenceId,
1191
1228
  node: nodeId,
1192
1229
  } }, (isMcpToolCall && {
1193
- mcpServerUrl: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.mcpServerUrl,
1194
- timeout: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.timeout,
1230
+ mcpServerUrl: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.mcpServerUrl,
1231
+ timeout: (_7 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _7 === void 0 ? void 0 : _7.timeout,
1195
1232
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1196
1233
  })), { toolCall: mainToolCall }),
1197
1234
  });
1198
1235
  // if there are any parameters/arguments, add them to the input slots
1199
1236
  if (mainToolCall.function.arguments) {
1200
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_8 = (_7 = input.aiAgent) === null || _7 === void 0 ? void 0 : _7.toolArgs) !== null && _8 !== void 0 ? _8 : {}), mainToolCall.function.arguments) });
1237
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_9 = (_8 = input.aiAgent) === null || _8 === void 0 ? void 0 : _8.toolArgs) !== null && _9 !== void 0 ? _9 : {}), mainToolCall.function.arguments) });
1201
1238
  }
1202
1239
  // Debug Message for Tool Calls, configured in the Tool Node
1203
- if ((_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.debugMessage) {
1240
+ if ((_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.debugMessage) {
1204
1241
  const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1205
1242
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1206
1243
  // Arguments / Parameters Slots
1207
- const slots = ((_10 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _10 === void 0 ? void 0 : _10.arguments) && Object.keys(mainToolCall.function.arguments);
1244
+ const slots = ((_11 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _11 === void 0 ? void 0 : _11.arguments) && Object.keys(mainToolCall.function.arguments);
1208
1245
  const hasSlots = slots && slots.length > 0;
1209
1246
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1210
1247
  if (hasSlots) {
@@ -1219,7 +1256,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1219
1256
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1220
1257
  });
1221
1258
  }
1222
- (_11 = api.logDebugMessage) === null || _11 === void 0 ? void 0 : _11.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1259
+ (_12 = api.logDebugMessage) === null || _12 === void 0 ? void 0 : _12.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1223
1260
  }
1224
1261
  if (toolChild) {
1225
1262
  api.setNextNode(toolChild.id);
@@ -1244,11 +1281,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1244
1281
  }
1245
1282
  // Optionally output the result immediately
1246
1283
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1247
- yield ((_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, llmResult.result, {}));
1284
+ yield ((_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, llmResult.result, {}));
1248
1285
  }
1249
1286
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1250
1287
  // send the finishReason as last output for a stream
1251
- (_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, "", {
1288
+ (_14 = api.output) === null || _14 === void 0 ? void 0 : _14.call(api, "", {
1252
1289
  _cognigy: {
1253
1290
  _preventTranscript: true,
1254
1291
  _messageId,
@@ -1271,7 +1308,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1271
1308
  }
1272
1309
  // Add response to Cognigy Input/Context for further usage
1273
1310
  if (storeLocation === "context") {
1274
- (_14 = api.addToContext) === null || _14 === void 0 ? void 0 : _14.call(api, contextKey, llmResult, "simple");
1311
+ (_15 = api.addToContext) === null || _15 === void 0 ? void 0 : _15.call(api, contextKey, llmResult, "simple");
1275
1312
  }
1276
1313
  else if (storeLocation === "input") {
1277
1314
  api.addToInput(inputKey, llmResult);
@@ -1284,14 +1321,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1284
1321
  const errorDetails = {
1285
1322
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1286
1323
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1287
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_15 = error.originalErrorDetails) === null || _15 === void 0 ? void 0 : _15.message),
1324
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_16 = error.originalErrorDetails) === null || _16 === void 0 ? void 0 : _16.message),
1288
1325
  };
1289
- (_16 = api.emitEvent) === null || _16 === void 0 ? void 0 : _16.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1326
+ (_17 = api.emitEvent) === null || _17 === void 0 ? void 0 : _17.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1290
1327
  if (logErrorToSystem) {
1291
- (_17 = api.log) === null || _17 === void 0 ? void 0 : _17.call(api, "error", JSON.stringify(errorDetails));
1328
+ (_18 = api.log) === null || _18 === void 0 ? void 0 : _18.call(api, "error", JSON.stringify(errorDetails));
1292
1329
  }
1293
1330
  if (errorHandling !== "stop") {
1294
- (_18 = api.logDebugError) === null || _18 === void 0 ? void 0 : _18.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1331
+ (_19 = api.logDebugError) === null || _19 === void 0 ? void 0 : _19.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1295
1332
  }
1296
1333
  if (storeErrorInInput) {
1297
1334
  input.aiAgent = input.aiAgent || {};
@@ -1300,7 +1337,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1300
1337
  if (errorHandling === "continue") {
1301
1338
  // output the timeout message
1302
1339
  if (errorMessage) {
1303
- yield ((_19 = api.output) === null || _19 === void 0 ? void 0 : _19.call(api, errorMessage, null));
1340
+ yield ((_20 = api.output) === null || _20 === void 0 ? void 0 : _20.call(api, errorMessage, null));
1304
1341
  }
1305
1342
  // Set default node as next node
1306
1343
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1312,7 +1349,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1312
1349
  if (!errorHandlingGotoTarget) {
1313
1350
  throw new Error("GoTo Target is required");
1314
1351
  }
1315
- if (!((_20 = api.checkThink) === null || _20 === void 0 ? void 0 : _20.call(api, nodeId))) {
1352
+ if (!((_21 = api.checkThink) === null || _21 === void 0 ? void 0 : _21.call(api, nodeId))) {
1316
1353
  api.resetNextNodes();
1317
1354
  yield api.executeFlow({
1318
1355
  flowNode: {
@@ -318,9 +318,16 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
318
318
  },
319
319
  {
320
320
  key: "debugLogRequestAndCompletion",
321
- label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__LABEL",
321
+ label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__LABEL",
322
322
  type: "toggle",
323
- description: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__DEBUGLOGREQUESTANDCOMPLETION__DESCRIPTION",
323
+ description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__DESCRIPTION",
324
+ defaultValue: false
325
+ },
326
+ {
327
+ key: "debugLogLLMLatency",
328
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
329
+ type: "toggle",
330
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
324
331
  defaultValue: false
325
332
  },
326
333
  {
@@ -579,6 +586,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
579
586
  "debugDescription",
580
587
  "debugLogTokenCount",
581
588
  "debugLogRequestAndCompletion",
589
+ "debugLogLLMLatency",
582
590
  "debugLogToolDefinitions"
583
591
  ]
584
592
  },
@@ -617,9 +625,9 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
617
625
  },
618
626
  tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
619
627
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
620
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
628
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
621
629
  const { api, input, flowReferenceId } = cognigy;
622
- const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
630
+ const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
623
631
  errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
624
632
  let prompt = config.prompt || "";
625
633
  const { traceId } = input;
@@ -645,17 +653,17 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
645
653
  }
646
654
  // handle errors from external services, depending on the settings
647
655
  const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
648
- var _v, _w, _x, _y, _z, _0;
656
+ var _w, _x, _y, _z, _0, _1;
649
657
  const compactError = {
650
658
  name: error === null || error === void 0 ? void 0 : error.name,
651
659
  code: error === null || error === void 0 ? void 0 : error.code,
652
660
  message: (error === null || error === void 0 ? void 0 : error.message) || error
653
661
  };
654
662
  // return the requestId if it exist in the error obj.
655
- if ((_v = error === null || error === void 0 ? void 0 : error.meta) === null || _v === void 0 ? void 0 : _v.requestId) {
656
- compactError["requestId"] = (_w = error === null || error === void 0 ? void 0 : error.meta) === null || _w === void 0 ? void 0 : _w.requestId;
663
+ if ((_w = error === null || error === void 0 ? void 0 : error.meta) === null || _w === void 0 ? void 0 : _w.requestId) {
664
+ compactError["requestId"] = (_x = error === null || error === void 0 ? void 0 : error.meta) === null || _x === void 0 ? void 0 : _x.requestId;
657
665
  }
658
- if ((_x = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _x === void 0 ? void 0 : _x.code) {
666
+ if ((_y = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _y === void 0 ? void 0 : _y.code) {
659
667
  compactError.code = error.originalErrorDetails.code;
660
668
  }
661
669
  const errorResponse = {
@@ -664,7 +672,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
664
672
  // add error to context or input
665
673
  switch (storeLocation) {
666
674
  case "context":
667
- (_y = api.addToContext) === null || _y === void 0 ? void 0 : _y.call(api, contextKey, errorResponse, "simple");
675
+ (_z = api.addToContext) === null || _z === void 0 ? void 0 : _z.call(api, contextKey, errorResponse, "simple");
668
676
  break;
669
677
  default:
670
678
  api.addToInput(inputKey, errorResponse);
@@ -672,7 +680,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
672
680
  if (errorHandling === "continue") {
673
681
  // output the timeout message
674
682
  if (errorMessage) {
675
- yield ((_z = api.output) === null || _z === void 0 ? void 0 : _z.call(api, errorMessage, null));
683
+ yield ((_0 = api.output) === null || _0 === void 0 ? void 0 : _0.call(api, errorMessage, null));
676
684
  }
677
685
  // Continue with default node as next node
678
686
  const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
@@ -699,7 +707,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
699
707
  absorbContext: false
700
708
  }
701
709
  };
702
- yield ((_0 = GO_TO.function) === null || _0 === void 0 ? void 0 : _0.call(GO_TO, gotoParams));
710
+ yield ((_1 = GO_TO.function) === null || _1 === void 0 ? void 0 : _1.call(GO_TO, gotoParams));
703
711
  }
704
712
  else {
705
713
  throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
@@ -708,6 +716,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
708
716
  try {
709
717
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
710
718
  const _messageId = randomUUID();
719
+ // Start measuring LLM latency and time to first output if debug flag is enabled
720
+ let firstOutputTime = null;
711
721
  /**
712
722
  * Retrieve the tool definitions from the child nodes
713
723
  */
@@ -724,6 +734,10 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
724
734
  var _a;
725
735
  text = isStreamingChannel ? text : text.trim();
726
736
  if (text) {
737
+ // Record first output time for debugging if not already recorded
738
+ if (debugLogLLMLatency && firstOutputTime === null) {
739
+ firstOutputTime = Date.now();
740
+ }
727
741
  // if we got text, we output it, but prevent it from being added to the transcript
728
742
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
729
743
  _cognigy: {
@@ -767,15 +781,38 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
767
781
  content: prompt
768
782
  }];
769
783
  }
784
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
770
785
  // Run the LLM Query
771
786
  const fullLlmResult = yield api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
787
+ // End measuring times and log if debug flag is enabled
788
+ if (debugLogLLMLatency) {
789
+ const llmEndTime = Date.now();
790
+ const debugMessages = [];
791
+ const llmLatencyMs = llmEndTime - llmStartTime;
792
+ let timeToFirstOutputLabel;
793
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
794
+ timeToFirstOutputLabel = " - (tool call)";
795
+ }
796
+ else if (firstOutputTime === null) {
797
+ timeToFirstOutputLabel = " - (no output)";
798
+ }
799
+ else {
800
+ firstOutputTime = firstOutputTime || llmEndTime;
801
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
802
+ }
803
+ if (storeLocation === "stream") {
804
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
805
+ }
806
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
807
+ (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
808
+ }
772
809
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
773
810
  const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
774
811
  if (debugLogToolDefinitions) {
775
- (_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
812
+ (_d = api.logDebugMessage) === null || _d === void 0 ? void 0 : _d.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
776
813
  }
777
814
  // if we're in adminconsole or following a session, process debugging options
778
- (input.endpointType === "adminconsole" || isFollowSessionActive) && writeLLMDebugLogs("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
815
+ (input.endpointType === "adminconsole" || isFollowSessionActive) && writeLLMDebugLogs("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, "llmPromptV2");
779
816
  if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
780
817
  const mainToolCall = llmResult.toolCalls[0];
781
818
  let isMcpToolCall = false;
@@ -787,7 +824,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
787
824
  isMcpToolCall = true;
788
825
  }
789
826
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
790
- (_d = api.logDebugError) === null || _d === void 0 ? void 0 : _d.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
827
+ (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
791
828
  }
792
829
  // Add last tool call to session state for loading it from Tool Answer Node
793
830
  api.updateSessionStateValues({
@@ -795,20 +832,20 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
795
832
  flow: flowReferenceId,
796
833
  node: nodeId,
797
834
  } }, (isMcpToolCall && {
798
- mcpServerUrl: (_e = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _e === void 0 ? void 0 : _e.mcpServerUrl,
799
- timeout: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.timeout,
835
+ mcpServerUrl: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.mcpServerUrl,
836
+ timeout: (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.timeout,
800
837
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
801
838
  })), { toolCall: mainToolCall }),
802
839
  });
803
840
  // if there are any parameters/arguments, add them to the input slots
804
841
  if (mainToolCall.function.arguments) {
805
- input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_h = (_g = input.llmPrompt) === null || _g === void 0 ? void 0 : _g.toolArgs) !== null && _h !== void 0 ? _h : {}), mainToolCall.function.arguments) });
842
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_j = (_h = input.llmPrompt) === null || _h === void 0 ? void 0 : _h.toolArgs) !== null && _j !== void 0 ? _j : {}), mainToolCall.function.arguments) });
806
843
  }
807
844
  // Debug Message for Tool Calls, configured in the Tool Node
808
- if ((_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.debugMessage) {
845
+ if ((_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.debugMessage) {
809
846
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
810
847
  // Arguments / Parameters Slots
811
- const slots = ((_k = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _k === void 0 ? void 0 : _k.arguments) && Object.keys(mainToolCall.function.arguments);
848
+ const slots = ((_l = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _l === void 0 ? void 0 : _l.arguments) && Object.keys(mainToolCall.function.arguments);
812
849
  const hasSlots = slots && slots.length > 0;
813
850
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
814
851
  if (hasSlots) {
@@ -823,7 +860,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
823
860
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
824
861
  });
825
862
  }
826
- (_l = api.logDebugMessage) === null || _l === void 0 ? void 0 : _l.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
863
+ (_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
827
864
  }
828
865
  if (toolChild) {
829
866
  api.setNextNode(toolChild.id);
@@ -848,11 +885,11 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
848
885
  // we stringify objects (e.g. results coming from JSON Mode)
849
886
  // so that the transcript only contains text
850
887
  const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
851
- yield ((_m = api.output) === null || _m === void 0 ? void 0 : _m.call(api, resultToOutput, {}));
888
+ yield ((_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, resultToOutput, {}));
852
889
  }
853
890
  else if (llmResult.finishReason && llmPromptOptions.stream) {
854
891
  // send the finishReason as last output for a stream
855
- (_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, "", {
892
+ (_p = api.output) === null || _p === void 0 ? void 0 : _p.call(api, "", {
856
893
  _cognigy: {
857
894
  _preventTranscript: true,
858
895
  _messageId,
@@ -875,7 +912,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
875
912
  }
876
913
  // Add response to Cognigy Input/Context for further usage
877
914
  if (storeLocation === "context") {
878
- (_p = api.addToContext) === null || _p === void 0 ? void 0 : _p.call(api, contextKey, llmResult, "simple");
915
+ (_q = api.addToContext) === null || _q === void 0 ? void 0 : _q.call(api, contextKey, llmResult, "simple");
879
916
  }
880
917
  else if (storeLocation === "input") {
881
918
  api.addToInput(inputKey, llmResult);
@@ -888,19 +925,19 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
888
925
  const errorDetailsBase = {
889
926
  name: error === null || error === void 0 ? void 0 : error.name,
890
927
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
891
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_q = error.originalErrorDetails) === null || _q === void 0 ? void 0 : _q.message),
928
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_r = error.originalErrorDetails) === null || _r === void 0 ? void 0 : _r.message),
892
929
  };
893
930
  const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
894
931
  // return the requestId if it exist in the error obj.
895
- if ((_r = error.meta) === null || _r === void 0 ? void 0 : _r.requestId) {
932
+ if ((_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId) {
896
933
  errorDetails["meta"] = {
897
- requestId: (_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId
934
+ requestId: (_t = error.meta) === null || _t === void 0 ? void 0 : _t.requestId
898
935
  };
899
936
  }
900
937
  if (logErrorToSystem) {
901
- (_t = api.log) === null || _t === void 0 ? void 0 : _t.call(api, "error", JSON.stringify(errorDetailsBase));
938
+ (_u = api.log) === null || _u === void 0 ? void 0 : _u.call(api, "error", JSON.stringify(errorDetailsBase));
902
939
  }
903
- (_u = api.logDebugError) === null || _u === void 0 ? void 0 : _u.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
940
+ (_v = api.logDebugError) === null || _v === void 0 ? void 0 : _v.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
904
941
  yield handleServiceError(errorDetails);
905
942
  return;
906
943
  }
@@ -33,6 +33,9 @@ export const createHandoverRequestDataSchema = {
33
33
  "type": "string",
34
34
  "format": "mongo-id",
35
35
  },
36
+ "locale": {
37
+ "type": "string",
38
+ },
36
39
  "userId": {
37
40
  "type": "string"
38
41
  },
@@ -195,6 +198,9 @@ export const sendMessageToProviderSchema = {
195
198
  "type": "string",
196
199
  "format": "mongo-id",
197
200
  },
201
+ "locale": {
202
+ "type": "string",
203
+ },
198
204
  "userId": {
199
205
  "type": "string"
200
206
  },
@@ -0,0 +1,6 @@
1
+ export var SuccessCriterionType;
2
+ (function (SuccessCriterionType) {
3
+ SuccessCriterionType["TEXT"] = "text";
4
+ SuccessCriterionType["GOAL_COMPLETED"] = "goalCompleted";
5
+ })(SuccessCriterionType || (SuccessCriterionType = {}));
6
+ //# sourceMappingURL=ISimulation.js.map