@cognigy/rest-api-client 2025.19.0 → 2025.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/apigroups/InsightsAPIGroup_2_0.js +10 -10
  3. package/build/apigroups/MetricsAPIGroup_2_0.js +4 -0
  4. package/build/shared/charts/descriptors/nlu/fuzzySearch.js +6 -6
  5. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +81 -21
  6. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +7 -5
  7. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +8 -1
  8. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +2 -0
  9. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +78 -18
  10. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +8 -1
  11. package/build/shared/interfaces/IOrganisation.js +1 -0
  12. package/build/shared/interfaces/resources/IAuditEvent.js +2 -1
  13. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  14. package/build/shared/interfaces/restAPI/metrics/logs/v2.0/ITailLogEntriesRest_2_0.js +3 -0
  15. package/dist/esm/apigroups/InsightsAPIGroup_2_0.js +10 -10
  16. package/dist/esm/apigroups/MetricsAPIGroup_2_0.js +4 -0
  17. package/dist/esm/shared/charts/descriptors/nlu/fuzzySearch.js +6 -6
  18. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +81 -21
  19. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +7 -5
  20. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +8 -1
  21. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +2 -0
  22. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +85 -25
  23. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +8 -1
  24. package/dist/esm/shared/interfaces/IOrganisation.js +1 -0
  25. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +2 -1
  26. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  27. package/dist/esm/shared/interfaces/restAPI/metrics/logs/v2.0/ITailLogEntriesRest_2_0.js +2 -0
  28. package/package.json +1 -1
  29. package/types/index.d.ts +48 -2
package/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ # 2025.20.0
2
+ Released: October 02nd, 2025
3
+
4
+ Released state of package up to date with Cognigy.AI v2025.20.0
5
+
1
6
  # 2025.19.0
2
7
  Released: September 16th, 2025
3
8
 
@@ -17,12 +17,12 @@ const rest_1 = require("../shared/helper/rest");
17
17
  function InsightsAPIGroup_2_0(instance) {
18
18
  const self = instance;
19
19
  return {
20
- indexSteps: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/steps?${(0, rest_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
21
- generateReport: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/insights/beta/reports/generate", "POST", self)(args, options),
22
- loadReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/reports/${queryHash}`, "GET", self)(undefined, options),
20
+ indexSteps: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/steps?${(0, rest_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
21
+ generateReport: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/v1.0/insights/reports/generate", "POST", self)(args, options),
22
+ loadReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/reports/${queryHash}`, "GET", self)(undefined, options),
23
23
  generateMessagesReport: (_a, options) => {
24
24
  var { search, skip, limit, next, previous, sort, messageType } = _a, args = __rest(_a, ["search", "skip", "limit", "next", "previous", "sort", "messageType"]);
25
- return (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/messages/report/generate?${(0, rest_1.stringifyQuery)({
25
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/messages/report/generate?${(0, rest_1.stringifyQuery)({
26
26
  search,
27
27
  skip,
28
28
  limit,
@@ -32,12 +32,12 @@ function InsightsAPIGroup_2_0(instance) {
32
32
  messageType
33
33
  })}`, "POST", self)(args, options);
34
34
  },
35
- loadMessagesReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/messages/report/${queryHash}`, "GET", self)(undefined, options),
36
- generateStepReport: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/insights/beta/steps/report/generate", "POST", self)(args, options),
37
- loadStepReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/steps/report/${queryHash}`, "GET", self)(undefined, options),
35
+ loadMessagesReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/messages/report/${queryHash}`, "GET", self)(undefined, options),
36
+ generateStepReport: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/v1.0/insights/steps/report/generate", "POST", self)(args, options),
37
+ loadStepReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/steps/report/${queryHash}`, "GET", self)(undefined, options),
38
38
  generateTranscriptsReport: (_a, options) => {
39
39
  var { search, skip, limit, next, previous, sort } = _a, args = __rest(_a, ["search", "skip", "limit", "next", "previous", "sort"]);
40
- return (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/transcripts/report/generate?${(0, rest_1.stringifyQuery)({
40
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/transcripts/report/generate?${(0, rest_1.stringifyQuery)({
41
41
  search,
42
42
  skip,
43
43
  limit,
@@ -46,12 +46,12 @@ function InsightsAPIGroup_2_0(instance) {
46
46
  sort
47
47
  })}`, "POST", self)(args, options);
48
48
  },
49
- loadTranscriptsReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/insights/beta/transcripts/report/${queryHash}`, "GET", self)(undefined, options),
49
+ loadTranscriptsReportByQueryHash: ({ queryHash }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/v1.0/insights/transcripts/report/${queryHash}`, "GET", self)(undefined, options),
50
50
  deleteAnalyticsRecords: (_a, options) => {
51
51
  var { projectId } = _a, restArgs = __rest(_a, ["projectId"]);
52
52
  return (0, GenericAPIFn_1.GenericAPIFn)(`/v2.0/analytics/${projectId}`, "DELETE", self)(restArgs, options);
53
53
  },
54
- insightsJWT: (options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/insights/auth", "POST", self)(undefined, options)
54
+ insightsJWT: (options) => (0, GenericAPIFn_1.GenericAPIFn)("/insights/auth", "POST", self)(undefined, options)
55
55
  };
56
56
  }
57
57
  exports.InsightsAPIGroup_2_0 = InsightsAPIGroup_2_0;
@@ -25,6 +25,10 @@ function MetricsAPIGroup_2_0(instance) {
25
25
  var { projectId } = _a, args = __rest(_a, ["projectId"]);
26
26
  return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/logs?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
27
27
  },
28
+ tailLogEntries: (_a, options) => {
29
+ var { projectId } = _a, args = __rest(_a, ["projectId"]);
30
+ return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/logs/tail?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
31
+ },
28
32
  readLogEntry: ({ projectId, logEntryId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/logs/${logEntryId}}`, "GET", self)(undefined, options),
29
33
  indexProfiles: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/profiles?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options),
30
34
  createProfile: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/new/v2.0/profiles", "POST", self)(args, options),
@@ -33,12 +33,12 @@ exports.FUZZY_SEARCH = (0, createNodeDescriptor_1.createNodeDescriptor)({
33
33
  params: {
34
34
  required: true
35
35
  },
36
- defaultValue: `[
37
- "apple",
38
- "orange",
39
- "banana",
40
- "pear"
41
- ]`
36
+ defaultValue: `{
37
+ "$cs":{
38
+ "script":"context.names",
39
+ "type":"array"
40
+ }
41
+ }`
42
42
  },
43
43
  {
44
44
  key: "isCaseSensitive",
@@ -428,6 +428,57 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
428
428
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
429
429
  defaultValue: true,
430
430
  },
431
+ {
432
+ key: "advancedLogging",
433
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__ADVANCED_LOGGING__LABEL",
434
+ type: "toggle",
435
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__ADVANCED_LOGGING__DESCRIPTION",
436
+ defaultValue: false,
437
+ },
438
+ {
439
+ key: "loggingWebhookUrl",
440
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_WEBHOOK_URL__LABEL",
441
+ type: "cognigyText",
442
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_WEBHOOK_URL__DESCRIPTION",
443
+ defaultValue: "",
444
+ condition: {
445
+ key: "advancedLogging",
446
+ value: true
447
+ }
448
+ },
449
+ {
450
+ key: "loggingCustomData",
451
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CUSTOM_LOGGING_DATA__LABEL",
452
+ type: "cognigyText",
453
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CUSTOM_LOGGING_DATA__DESCRIPTION",
454
+ defaultValue: "",
455
+ condition: {
456
+ key: "advancedLogging",
457
+ value: true
458
+ }
459
+ },
460
+ {
461
+ key: "loggingHeaders",
462
+ type: "keyValuePairs",
463
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_HEADERS__LABEL",
464
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_HEADERS__DESCRIPTION",
465
+ defaultValue: "{}",
466
+ condition: {
467
+ key: "advancedLogging",
468
+ value: true
469
+ }
470
+ },
471
+ {
472
+ key: "conditionForLogging",
473
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONDITION_FOR_LOGGING__LABEL",
474
+ type: "cognigyText",
475
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONDITION_FOR_LOGGING__DESCRIPTION",
476
+ defaultValue: "",
477
+ condition: {
478
+ key: "advancedLogging",
479
+ value: true
480
+ }
481
+ },
431
482
  {
432
483
  key: "logErrorToSystem",
433
484
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
@@ -823,7 +874,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
823
874
  "timeoutInMs",
824
875
  "maxTokens",
825
876
  "temperature",
826
- "useTextAlternativeForLLM",
877
+ "useTextAlternativeForLLM"
827
878
  ],
828
879
  },
829
880
  {
@@ -848,7 +899,12 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
848
899
  "debugLogTokenCount",
849
900
  "debugLogSystemPrompt",
850
901
  "debugLogToolDefinitions",
851
- "debugLogLLMLatency"
902
+ "debugLogLLMLatency",
903
+ "advancedLogging",
904
+ "loggingWebhookUrl",
905
+ "loggingCustomData",
906
+ "conditionForLogging",
907
+ "loggingHeaders",
852
908
  ],
853
909
  }
854
910
  ],
@@ -869,9 +925,9 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
869
925
  ],
870
926
  tags: ["ai", "aiAgent"],
871
927
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
872
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
928
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24;
873
929
  const { api, context, input, profile, flowReferenceId } = cognigy;
874
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
930
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, conditionForLogging, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, loggingHeaders, sessionParams } = config;
875
931
  try {
876
932
  if (!aiAgent) {
877
933
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -919,7 +975,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
919
975
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
920
976
  }
921
977
  }
922
- const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
978
+ const _25 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _25, cleanedProfile = __rest(_25, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
923
979
  const userMemory = (0, getUserMemory_1.getUserMemory)(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
924
980
  /**
925
981
  * ----- Knowledge Search Section -----
@@ -1164,7 +1220,8 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1164
1220
  }
1165
1221
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1166
1222
  const _messageId = (0, uuid_1.v4)();
1167
- const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1223
+ const enableAdvancedLogging = advancedLogging && loggingWebhookUrl && (conditionForLogging === "" || !!conditionForLogging);
1224
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1168
1225
  // Temp fix to override the transcript if needed
1169
1226
  transcript: ((_0 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _0 === void 0 ? void 0 : _0.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1170
1227
  var _a;
@@ -1182,7 +1239,9 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1182
1239
  }
1183
1240
  });
1184
1241
  }
1185
- }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], preventNewLineRemoval: isStreamingChannel ? true : false }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice }));
1242
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], preventNewLineRemoval: isStreamingChannel ? true : false }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), (enableAdvancedLogging && {
1243
+ logging: Object.assign(Object.assign({ webhookUrl: loggingWebhookUrl }, (loggingCustomData && { customData: loggingCustomData })), (loggingHeaders && { headers: loggingHeaders }))
1244
+ }));
1186
1245
  // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
1187
1246
  if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
1188
1247
  llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
@@ -1257,20 +1316,21 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1257
1316
  node: nodeId,
1258
1317
  } }, (isMcpToolCall && {
1259
1318
  mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
1260
- timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
1319
+ mcpHeaders: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.mcpHeaders,
1320
+ timeout: (_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.timeout,
1261
1321
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1262
1322
  })), { toolCall: mainToolCall }),
1263
1323
  });
1264
1324
  // if there are any parameters/arguments, add them to the input slots
1265
1325
  if (mainToolCall.function.arguments) {
1266
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
1326
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_12 = (_11 = input.aiAgent) === null || _11 === void 0 ? void 0 : _11.toolArgs) !== null && _12 !== void 0 ? _12 : {}), mainToolCall.function.arguments) });
1267
1327
  }
1268
1328
  // Debug Message for Tool Calls, configured in the Tool Node
1269
- if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
1329
+ if ((_13 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _13 === void 0 ? void 0 : _13.debugMessage) {
1270
1330
  const toolId = isMcpToolCall ? mainToolCall.function.name : await api.parseCognigyScriptText(toolChild.config.toolId);
1271
1331
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1272
1332
  // Arguments / Parameters Slots
1273
- const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
1333
+ const slots = ((_14 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _14 === void 0 ? void 0 : _14.arguments) && Object.keys(mainToolCall.function.arguments);
1274
1334
  const hasSlots = slots && slots.length > 0;
1275
1335
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1276
1336
  if (hasSlots) {
@@ -1285,7 +1345,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1285
1345
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1286
1346
  });
1287
1347
  }
1288
- (_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1348
+ (_15 = api.logDebugMessage) === null || _15 === void 0 ? void 0 : _15.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1289
1349
  }
1290
1350
  if (toolChild) {
1291
1351
  api.setNextNode(toolChild.id);
@@ -1310,11 +1370,11 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1310
1370
  }
1311
1371
  // Optionally output the result immediately
1312
1372
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1313
- await ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
1373
+ await ((_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, llmResult.result, {}));
1314
1374
  }
1315
1375
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1316
1376
  // send the finishReason as last output for a stream
1317
- (_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
1377
+ (_17 = api.output) === null || _17 === void 0 ? void 0 : _17.call(api, "", {
1318
1378
  _cognigy: {
1319
1379
  _preventTranscript: true,
1320
1380
  _messageId,
@@ -1337,7 +1397,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1337
1397
  }
1338
1398
  // Add response to Cognigy Input/Context for further usage
1339
1399
  if (storeLocation === "context") {
1340
- (_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
1400
+ (_18 = api.addToContext) === null || _18 === void 0 ? void 0 : _18.call(api, contextKey, llmResult, "simple");
1341
1401
  }
1342
1402
  else if (storeLocation === "input") {
1343
1403
  api.addToInput(inputKey, llmResult);
@@ -1350,14 +1410,14 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1350
1410
  const errorDetails = {
1351
1411
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1352
1412
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1353
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
1413
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_19 = error.originalErrorDetails) === null || _19 === void 0 ? void 0 : _19.message),
1354
1414
  };
1355
- (_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1415
+ (_20 = api.emitEvent) === null || _20 === void 0 ? void 0 : _20.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1356
1416
  if (logErrorToSystem) {
1357
- (_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
1417
+ (_21 = api.log) === null || _21 === void 0 ? void 0 : _21.call(api, "error", JSON.stringify(errorDetails));
1358
1418
  }
1359
1419
  if (errorHandling !== "stop") {
1360
- (_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1420
+ (_22 = api.logDebugError) === null || _22 === void 0 ? void 0 : _22.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1361
1421
  }
1362
1422
  if (storeErrorInInput) {
1363
1423
  input.aiAgent = input.aiAgent || {};
@@ -1366,7 +1426,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1366
1426
  if (errorHandling === "continue") {
1367
1427
  // output the timeout message
1368
1428
  if (errorMessage) {
1369
- await ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
1429
+ await ((_23 = api.output) === null || _23 === void 0 ? void 0 : _23.call(api, errorMessage, null));
1370
1430
  }
1371
1431
  // Set default node as next node
1372
1432
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1378,7 +1438,7 @@ exports.AI_AGENT_JOB = (0, createNodeDescriptor_1.createNodeDescriptor)({
1378
1438
  if (!errorHandlingGotoTarget) {
1379
1439
  throw new Error("GoTo Target is required");
1380
1440
  }
1381
- if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
1441
+ if (!((_24 = api.checkThink) === null || _24 === void 0 ? void 0 : _24.call(api, nodeId))) {
1382
1442
  api.resetNextNodes();
1383
1443
  await api.executeFlow({
1384
1444
  flowNode: {
@@ -94,7 +94,7 @@ exports.AI_AGENT_JOB_CALL_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescri
94
94
  },
95
95
  tags: ["ai", "aiAgent"],
96
96
  function: async ({ cognigy, config }) => {
97
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
97
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
98
98
  const { api } = cognigy;
99
99
  const { storeLocation, contextKey, inputKey, resolveImmediately, debugToolResult } = config;
100
100
  const sessionState = await api.loadSessionState();
@@ -102,8 +102,9 @@ exports.AI_AGENT_JOB_CALL_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescri
102
102
  const aiAgentJobNode = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.aiAgentJobNode;
103
103
  const mcpServerUrl = (_c = sessionState.lastToolCall) === null || _c === void 0 ? void 0 : _c.mcpServerUrl;
104
104
  const timeout = (_d = sessionState.lastToolCall) === null || _d === void 0 ? void 0 : _d.timeout;
105
+ const mcpHeaders = (_e = sessionState.lastToolCall) === null || _e === void 0 ? void 0 : _e.mcpHeaders;
105
106
  if (!(toolCall === null || toolCall === void 0 ? void 0 : toolCall.id)) {
106
- (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
107
+ (_f = api.logDebugError) === null || _f === void 0 ? void 0 : _f.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
107
108
  }
108
109
  if (toolCall && aiAgentJobNode && mcpServerUrl && timeout) {
109
110
  let toolResult = null;
@@ -114,10 +115,11 @@ exports.AI_AGENT_JOB_CALL_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescri
114
115
  toolName: toolCall.function.name,
115
116
  toolArgs: toolCall.function.arguments,
116
117
  timeout,
118
+ mcpHeaders: mcpHeaders !== null && mcpHeaders !== void 0 ? mcpHeaders : {},
117
119
  });
118
120
  fullResult = JSON.stringify(toolResult, null, 2);
119
121
  if (debugToolResult) {
120
- (_f = api.logDebugMessage) === null || _f === void 0 ? void 0 : _f.call(api, `Tool <b>${(_g = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _g === void 0 ? void 0 : _g.name}</b> called successfully.<br><br><b>Result:</b><br>${fullResult}`);
122
+ (_g = api.logDebugMessage) === null || _g === void 0 ? void 0 : _g.call(api, `Tool <b>${(_h = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _h === void 0 ? void 0 : _h.name}</b> called successfully.<br><br><b>Result:</b><br>${fullResult}`);
121
123
  }
122
124
  }
123
125
  catch (error) {
@@ -127,11 +129,11 @@ exports.AI_AGENT_JOB_CALL_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescri
127
129
  message: error.message,
128
130
  }
129
131
  : error;
130
- (_h = api.logDebugError) === null || _h === void 0 ? void 0 : _h.call(api, `Failed to execute MCP Tool ${(_j = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _j === void 0 ? void 0 : _j.name}:<br>${JSON.stringify(errorDetails, null, 2)}`);
132
+ (_j = api.logDebugError) === null || _j === void 0 ? void 0 : _j.call(api, `Failed to execute MCP Tool ${(_k = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _k === void 0 ? void 0 : _k.name}:<br>${JSON.stringify(errorDetails, null, 2)}`);
131
133
  }
132
134
  // Add result to Cognigy Input/Context for further usage
133
135
  if (storeLocation === "context") {
134
- (_k = api.addToContext) === null || _k === void 0 ? void 0 : _k.call(api, contextKey, toolResult, "simple");
136
+ (_l = api.addToContext) === null || _l === void 0 ? void 0 : _l.call(api, contextKey, toolResult, "simple");
135
137
  }
136
138
  else if (storeLocation === "input") {
137
139
  api.addToInput(inputKey, toolResult);
@@ -163,6 +163,13 @@ exports.AI_AGENT_JOB_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)
163
163
  value: "blacklist",
164
164
  },
165
165
  },
166
+ {
167
+ key: "mcpHeaders",
168
+ type: "keyValuePairs",
169
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__LABEL",
170
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
171
+ defaultValue: "{}",
172
+ },
166
173
  ],
167
174
  sections: [
168
175
  {
@@ -175,7 +182,7 @@ exports.AI_AGENT_JOB_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)
175
182
  key: "advanced",
176
183
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
177
184
  defaultCollapsed: true,
178
- fields: ["cacheTools", "condition", "toolFilter", "whitelist", "blacklist"],
185
+ fields: ["cacheTools", "condition", "toolFilter", "whitelist", "blacklist", "mcpHeaders"],
179
186
  },
180
187
  ],
181
188
  form: [
@@ -74,12 +74,14 @@ const createToolDefinitions = async (childConfigs, api, useStrict) => {
74
74
  const cacheTools = child.config.cacheTools;
75
75
  const sendDebug = child.config.debugMessageFetchedTools;
76
76
  const toolFilter = child.config.toolFilter;
77
+ const mcpHeaders = child.config.mcpHeaders;
77
78
  let mcpTools = null;
78
79
  try {
79
80
  mcpTools = await api.fetchMcpTools({
80
81
  mcpServerUrl,
81
82
  timeout,
82
83
  cacheTools,
84
+ mcpHeaders,
83
85
  });
84
86
  }
85
87
  catch (error) {
@@ -408,6 +408,57 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
408
408
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
409
409
  defaultValue: true,
410
410
  },
411
+ {
412
+ key: "advancedLogging",
413
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__ADVANCED_LOGGING__LABEL",
414
+ type: "toggle",
415
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__ADVANCED_LOGGING__DESCRIPTION",
416
+ defaultValue: false,
417
+ },
418
+ {
419
+ key: "loggingWebhookUrl",
420
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_WEBHOOK_URL__LABEL",
421
+ type: "cognigyText",
422
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_WEBHOOK_URL__DESCRIPTION",
423
+ defaultValue: "",
424
+ condition: {
425
+ key: "advancedLogging",
426
+ value: true
427
+ }
428
+ },
429
+ {
430
+ key: "loggingCustomData",
431
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CUSTOM_LOGGING_DATA__LABEL",
432
+ type: "cognigyText",
433
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CUSTOM_LOGGING_DATA__DESCRIPTION",
434
+ defaultValue: "",
435
+ condition: {
436
+ key: "advancedLogging",
437
+ value: true
438
+ }
439
+ },
440
+ {
441
+ key: "loggingHeaders",
442
+ type: "keyValuePairs",
443
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_HEADERS__LABEL",
444
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_HEADERS__DESCRIPTION",
445
+ defaultValue: "{}",
446
+ condition: {
447
+ key: "advancedLogging",
448
+ value: true
449
+ }
450
+ },
451
+ {
452
+ key: "conditionForLogging",
453
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONDITION_FOR_LOGGING__LABEL",
454
+ type: "cognigyText",
455
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONDITION_FOR_LOGGING__DESCRIPTION",
456
+ defaultValue: "",
457
+ condition: {
458
+ key: "advancedLogging",
459
+ value: true
460
+ }
461
+ },
411
462
  {
412
463
  key: "customModelOptions",
413
464
  label: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__FIELDS__CUSTOM_MODEL_OPTIONS__LABEL",
@@ -608,7 +659,12 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
608
659
  "debugLogTokenCount",
609
660
  "debugLogRequestAndCompletion",
610
661
  "debugLogLLMLatency",
611
- "debugLogToolDefinitions"
662
+ "debugLogToolDefinitions",
663
+ "advancedLogging",
664
+ "loggingWebhookUrl",
665
+ "loggingCustomData",
666
+ "conditionForLogging",
667
+ "loggingHeaders",
612
668
  ]
613
669
  },
614
670
  {
@@ -646,10 +702,10 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
646
702
  },
647
703
  tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
648
704
  function: async ({ cognigy, config, childConfigs, nodeId }) => {
649
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
705
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
650
706
  const { api, input, flowReferenceId } = cognigy;
651
707
  const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
652
- errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
708
+ errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, loggingHeaders, conditionForLogging, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
653
709
  let prompt = config.prompt || "";
654
710
  const { traceId } = input;
655
711
  // check if custom variables are used and if they have a length modifier
@@ -743,10 +799,11 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
743
799
  * Retrieve the tool definitions from the child nodes
744
800
  */
745
801
  const { toolIds, toolNames, toolMap, tools } = await (0, createToolDefinitions_1.createToolDefinitions)(childConfigs, api, useStrict);
802
+ const enableAdvancedLogging = advancedLogging && loggingWebhookUrl && (conditionForLogging === "" || !!conditionForLogging);
746
803
  /**
747
804
  * Generate Prompt Options
748
805
  */
749
- const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt,
806
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign(Object.assign({ prompt,
750
807
  temperature,
751
808
  maxTokens,
752
809
  topP,
@@ -769,7 +826,9 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
769
826
  }
770
827
  }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], streamStopTokenOverrides, preventNewLineRemoval: isStreamingChannel ? true : false,
771
828
  // set to true in order to get token usage
772
- detailedResults: true, seed: Number(seed) ? Number(seed) : undefined }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), { customModelOptions,
829
+ detailedResults: true, seed: Number(seed) ? Number(seed) : undefined }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), (enableAdvancedLogging && {
830
+ logging: Object.assign(Object.assign({ webhookUrl: loggingWebhookUrl }, (loggingCustomData && { customData: loggingCustomData })), (loggingHeaders && { headers: loggingHeaders }))
831
+ })), { customModelOptions,
773
832
  customRequestOptions });
774
833
  if (useStop) {
775
834
  llmPromptOptions["stop"] = stop;
@@ -861,19 +920,20 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
861
920
  node: nodeId,
862
921
  } }, (isMcpToolCall && {
863
922
  mcpServerUrl: (_h = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _h === void 0 ? void 0 : _h.mcpServerUrl,
864
- timeout: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.timeout,
923
+ mcpHeaders: (_j = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _j === void 0 ? void 0 : _j.mcpHeaders,
924
+ timeout: (_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.timeout,
865
925
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
866
926
  })), { toolCall: mainToolCall }),
867
927
  });
868
928
  // if there are any parameters/arguments, add them to the input slots
869
929
  if (mainToolCall.function.arguments) {
870
- input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_l = (_k = input.llmPrompt) === null || _k === void 0 ? void 0 : _k.toolArgs) !== null && _l !== void 0 ? _l : {}), mainToolCall.function.arguments) });
930
+ input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_m = (_l = input.llmPrompt) === null || _l === void 0 ? void 0 : _l.toolArgs) !== null && _m !== void 0 ? _m : {}), mainToolCall.function.arguments) });
871
931
  }
872
932
  // Debug Message for Tool Calls, configured in the Tool Node
873
- if ((_m = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _m === void 0 ? void 0 : _m.debugMessage) {
933
+ if ((_o = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _o === void 0 ? void 0 : _o.debugMessage) {
874
934
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${await api.parseCognigyScriptText(toolChild.config.toolId)}`];
875
935
  // Arguments / Parameters Slots
876
- const slots = ((_o = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _o === void 0 ? void 0 : _o.arguments) && Object.keys(mainToolCall.function.arguments);
936
+ const slots = ((_p = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _p === void 0 ? void 0 : _p.arguments) && Object.keys(mainToolCall.function.arguments);
877
937
  const hasSlots = slots && slots.length > 0;
878
938
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
879
939
  if (hasSlots) {
@@ -888,7 +948,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
888
948
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
889
949
  });
890
950
  }
891
- (_p = api.logDebugMessage) === null || _p === void 0 ? void 0 : _p.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
951
+ (_q = api.logDebugMessage) === null || _q === void 0 ? void 0 : _q.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
892
952
  }
893
953
  if (toolChild) {
894
954
  api.setNextNode(toolChild.id);
@@ -913,11 +973,11 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
913
973
  // we stringify objects (e.g. results coming from JSON Mode)
914
974
  // so that the transcript only contains text
915
975
  const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
916
- await ((_q = api.output) === null || _q === void 0 ? void 0 : _q.call(api, resultToOutput, {}));
976
+ await ((_r = api.output) === null || _r === void 0 ? void 0 : _r.call(api, resultToOutput, {}));
917
977
  }
918
978
  else if (llmResult.finishReason && llmPromptOptions.stream) {
919
979
  // send the finishReason as last output for a stream
920
- (_r = api.output) === null || _r === void 0 ? void 0 : _r.call(api, "", {
980
+ (_s = api.output) === null || _s === void 0 ? void 0 : _s.call(api, "", {
921
981
  _cognigy: {
922
982
  _preventTranscript: true,
923
983
  _messageId,
@@ -940,7 +1000,7 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
940
1000
  }
941
1001
  // Add response to Cognigy Input/Context for further usage
942
1002
  if (storeLocation === "context") {
943
- (_s = api.addToContext) === null || _s === void 0 ? void 0 : _s.call(api, contextKey, llmResult, "simple");
1003
+ (_t = api.addToContext) === null || _t === void 0 ? void 0 : _t.call(api, contextKey, llmResult, "simple");
944
1004
  }
945
1005
  else if (storeLocation === "input") {
946
1006
  api.addToInput(inputKey, llmResult);
@@ -953,19 +1013,19 @@ exports.LLM_PROMPT_V2 = (0, createNodeDescriptor_1.createNodeDescriptor)({
953
1013
  const errorDetailsBase = {
954
1014
  name: error === null || error === void 0 ? void 0 : error.name,
955
1015
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
956
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_t = error.originalErrorDetails) === null || _t === void 0 ? void 0 : _t.message),
1016
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_u = error.originalErrorDetails) === null || _u === void 0 ? void 0 : _u.message),
957
1017
  };
958
1018
  const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
959
1019
  // return the requestId if it exist in the error obj.
960
- if ((_u = error.meta) === null || _u === void 0 ? void 0 : _u.requestId) {
1020
+ if ((_v = error.meta) === null || _v === void 0 ? void 0 : _v.requestId) {
961
1021
  errorDetails["meta"] = {
962
- requestId: (_v = error.meta) === null || _v === void 0 ? void 0 : _v.requestId
1022
+ requestId: (_w = error.meta) === null || _w === void 0 ? void 0 : _w.requestId
963
1023
  };
964
1024
  }
965
1025
  if (logErrorToSystem) {
966
- (_w = api.log) === null || _w === void 0 ? void 0 : _w.call(api, "error", JSON.stringify(errorDetailsBase));
1026
+ (_x = api.log) === null || _x === void 0 ? void 0 : _x.call(api, "error", JSON.stringify(errorDetailsBase));
967
1027
  }
968
- (_x = api.logDebugError) === null || _x === void 0 ? void 0 : _x.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
1028
+ (_y = api.logDebugError) === null || _y === void 0 ? void 0 : _y.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
969
1029
  await handleServiceError(errorDetails);
970
1030
  return;
971
1031
  }
@@ -163,6 +163,13 @@ exports.LLM_PROMPT_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)({
163
163
  value: "blacklist",
164
164
  },
165
165
  },
166
+ {
167
+ key: "mcpHeaders",
168
+ type: "keyValuePairs",
169
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__LABEL",
170
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
171
+ defaultValue: "{}",
172
+ },
166
173
  ],
167
174
  sections: [
168
175
  {
@@ -175,7 +182,7 @@ exports.LLM_PROMPT_MCP_TOOL = (0, createNodeDescriptor_1.createNodeDescriptor)({
175
182
  key: "advanced",
176
183
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
177
184
  defaultCollapsed: true,
178
- fields: ["cacheTools", "condition", "toolFilter", "whitelist", "blacklist"],
185
+ fields: ["cacheTools", "condition", "toolFilter", "whitelist", "blacklist", "mcpHeaders"],
179
186
  },
180
187
  ],
181
188
  form: [