@cognigy/rest-api-client 2025.19.0 → 2025.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/build/apigroups/InsightsAPIGroup_2_0.js +10 -10
  3. package/build/apigroups/MetricsAPIGroup_2_0.js +4 -0
  4. package/build/shared/charts/descriptors/nlu/fuzzySearch.js +6 -6
  5. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +81 -21
  6. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +7 -5
  7. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +8 -1
  8. package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +2 -0
  9. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +78 -18
  10. package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +8 -1
  11. package/build/shared/interfaces/IOrganisation.js +1 -0
  12. package/build/shared/interfaces/resources/IAuditEvent.js +2 -1
  13. package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  14. package/build/shared/interfaces/restAPI/metrics/logs/v2.0/ITailLogEntriesRest_2_0.js +3 -0
  15. package/dist/esm/apigroups/InsightsAPIGroup_2_0.js +10 -10
  16. package/dist/esm/apigroups/MetricsAPIGroup_2_0.js +4 -0
  17. package/dist/esm/shared/charts/descriptors/nlu/fuzzySearch.js +6 -6
  18. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +81 -21
  19. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +7 -5
  20. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +8 -1
  21. package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +2 -0
  22. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +85 -25
  23. package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +8 -1
  24. package/dist/esm/shared/interfaces/IOrganisation.js +1 -0
  25. package/dist/esm/shared/interfaces/resources/IAuditEvent.js +2 -1
  26. package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
  27. package/dist/esm/shared/interfaces/restAPI/metrics/logs/v2.0/ITailLogEntriesRest_2_0.js +2 -0
  28. package/package.json +1 -1
  29. package/types/index.d.ts +48 -2
@@ -59,6 +59,7 @@ exports.organisationDataSchema = {
59
59
  stepEventsTTLInMinutes: { type: "integer", minimum: 0 },
60
60
  disabled: { type: "boolean" },
61
61
  name: { type: "string", minLength: 1, format: "resource-name" },
62
+ businessUnitId: { type: "string" },
62
63
  passwordPolicy: exports.organisationPasswordPolicySchema,
63
64
  quotaMaxChannelsPerProject: { type: "integer", minimum: 0 },
64
65
  quotaMaxMessagesPerDay: { type: "integer", minimum: 0 },
@@ -110,7 +110,8 @@ exports.actionTypes = [
110
110
  "processKnowledgeSourceFile",
111
111
  "setupObservationConfig",
112
112
  "updateObservationConfig",
113
- "resolveAiOpsCenterError"
113
+ "resolveAiOpsCenterError",
114
+ "odataRequest",
114
115
  ];
115
116
  exports.auditEventSchema = {
116
117
  title: "auditEventSchema",
@@ -4,7 +4,7 @@ exports.knowledgeSourceSchema = exports.knowledgeSourceDataSchema = exports.know
4
4
  /* Interfaces & Types */
5
5
  const IEntityMeta_1 = require("../IEntityMeta");
6
6
  exports.knowledgeSourceStatus = ["ready", "ingesting", "disabled", "failure"];
7
- exports.knowledgeSourceType = ["pdf", "txt", "docx", "pptx", "ctxt", "url", "manual", "jpeg", "jpg", "png", "bmp", "heif", "tiff"];
7
+ exports.knowledgeSourceType = ["pdf", "txt", "docx", "pptx", "ctxt", "url", "manual", "jpeg", "jpg", "png", "bmp", "heif", "tiff", "extension"];
8
8
  exports.knowledgeSourceDataSchema = {
9
9
  title: "knowledgeSourceDataSchema",
10
10
  type: "object",
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=ITailLogEntriesRest_2_0.js.map
@@ -4,12 +4,12 @@ import { stringifyQuery } from "../shared/helper/rest";
4
4
  export function InsightsAPIGroup_2_0(instance) {
5
5
  const self = instance;
6
6
  return {
7
- indexSteps: (args, options) => GenericAPIFn(`/new/insights/beta/steps?${stringifyQuery(args)}`, "GET", self)(undefined, options),
8
- generateReport: (args, options) => GenericAPIFn("/new/insights/beta/reports/generate", "POST", self)(args, options),
9
- loadReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/new/insights/beta/reports/${queryHash}`, "GET", self)(undefined, options),
7
+ indexSteps: (args, options) => GenericAPIFn(`/v1.0/insights/steps?${stringifyQuery(args)}`, "GET", self)(undefined, options),
8
+ generateReport: (args, options) => GenericAPIFn("/v1.0/insights/reports/generate", "POST", self)(args, options),
9
+ loadReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/v1.0/insights/reports/${queryHash}`, "GET", self)(undefined, options),
10
10
  generateMessagesReport: (_a, options) => {
11
11
  var { search, skip, limit, next, previous, sort, messageType } = _a, args = __rest(_a, ["search", "skip", "limit", "next", "previous", "sort", "messageType"]);
12
- return GenericAPIFn(`/new/insights/beta/messages/report/generate?${stringifyQuery({
12
+ return GenericAPIFn(`/v1.0/insights/messages/report/generate?${stringifyQuery({
13
13
  search,
14
14
  skip,
15
15
  limit,
@@ -19,12 +19,12 @@ export function InsightsAPIGroup_2_0(instance) {
19
19
  messageType
20
20
  })}`, "POST", self)(args, options);
21
21
  },
22
- loadMessagesReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/new/insights/beta/messages/report/${queryHash}`, "GET", self)(undefined, options),
23
- generateStepReport: (args, options) => GenericAPIFn("/new/insights/beta/steps/report/generate", "POST", self)(args, options),
24
- loadStepReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/new/insights/beta/steps/report/${queryHash}`, "GET", self)(undefined, options),
22
+ loadMessagesReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/v1.0/insights/messages/report/${queryHash}`, "GET", self)(undefined, options),
23
+ generateStepReport: (args, options) => GenericAPIFn("/v1.0/insights/steps/report/generate", "POST", self)(args, options),
24
+ loadStepReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/v1.0/insights/steps/report/${queryHash}`, "GET", self)(undefined, options),
25
25
  generateTranscriptsReport: (_a, options) => {
26
26
  var { search, skip, limit, next, previous, sort } = _a, args = __rest(_a, ["search", "skip", "limit", "next", "previous", "sort"]);
27
- return GenericAPIFn(`/new/insights/beta/transcripts/report/generate?${stringifyQuery({
27
+ return GenericAPIFn(`/v1.0/insights/transcripts/report/generate?${stringifyQuery({
28
28
  search,
29
29
  skip,
30
30
  limit,
@@ -33,12 +33,12 @@ export function InsightsAPIGroup_2_0(instance) {
33
33
  sort
34
34
  })}`, "POST", self)(args, options);
35
35
  },
36
- loadTranscriptsReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/new/insights/beta/transcripts/report/${queryHash}`, "GET", self)(undefined, options),
36
+ loadTranscriptsReportByQueryHash: ({ queryHash }, options) => GenericAPIFn(`/v1.0/insights/transcripts/report/${queryHash}`, "GET", self)(undefined, options),
37
37
  deleteAnalyticsRecords: (_a, options) => {
38
38
  var { projectId } = _a, restArgs = __rest(_a, ["projectId"]);
39
39
  return GenericAPIFn(`/v2.0/analytics/${projectId}`, "DELETE", self)(restArgs, options);
40
40
  },
41
- insightsJWT: (options) => GenericAPIFn("/new/insights/auth", "POST", self)(undefined, options)
41
+ insightsJWT: (options) => GenericAPIFn("/insights/auth", "POST", self)(undefined, options)
42
42
  };
43
43
  }
44
44
  //# sourceMappingURL=InsightsAPIGroup_2_0.js.map
@@ -12,6 +12,10 @@ export function MetricsAPIGroup_2_0(instance) {
12
12
  var { projectId } = _a, args = __rest(_a, ["projectId"]);
13
13
  return GenericAPIFn(`/new/v2.0/projects/${projectId}/logs?${stringifyQuery(args)}`, "GET", self)(undefined, options);
14
14
  },
15
+ tailLogEntries: (_a, options) => {
16
+ var { projectId } = _a, args = __rest(_a, ["projectId"]);
17
+ return GenericAPIFn(`/new/v2.0/projects/${projectId}/logs/tail?${stringifyQuery(args)}`, "GET", self)(undefined, options);
18
+ },
15
19
  readLogEntry: ({ projectId, logEntryId }, options) => GenericAPIFn(`/new/v2.0/projects/${projectId}/logs/${logEntryId}}`, "GET", self)(undefined, options),
16
20
  indexProfiles: (args, options) => GenericAPIFn(`/new/v2.0/profiles?${stringifyQuery(args)}`, "GET", self)(undefined, options),
17
21
  createProfile: (args, options) => GenericAPIFn("/new/v2.0/profiles", "POST", self)(args, options),
@@ -31,12 +31,12 @@ export const FUZZY_SEARCH = createNodeDescriptor({
31
31
  params: {
32
32
  required: true
33
33
  },
34
- defaultValue: `[
35
- "apple",
36
- "orange",
37
- "banana",
38
- "pear"
39
- ]`
34
+ defaultValue: `{
35
+ "$cs":{
36
+ "script":"context.names",
37
+ "type":"array"
38
+ }
39
+ }`
40
40
  },
41
41
  {
42
42
  key: "isCaseSensitive",
@@ -415,6 +415,57 @@ export const AI_AGENT_JOB = createNodeDescriptor({
415
415
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__INCLUDE_ALL_OUTPUT_TYPES__DESCRIPTION",
416
416
  defaultValue: true,
417
417
  },
418
+ {
419
+ key: "advancedLogging",
420
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__ADVANCED_LOGGING__LABEL",
421
+ type: "toggle",
422
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__ADVANCED_LOGGING__DESCRIPTION",
423
+ defaultValue: false,
424
+ },
425
+ {
426
+ key: "loggingWebhookUrl",
427
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_WEBHOOK_URL__LABEL",
428
+ type: "cognigyText",
429
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_WEBHOOK_URL__DESCRIPTION",
430
+ defaultValue: "",
431
+ condition: {
432
+ key: "advancedLogging",
433
+ value: true
434
+ }
435
+ },
436
+ {
437
+ key: "loggingCustomData",
438
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CUSTOM_LOGGING_DATA__LABEL",
439
+ type: "cognigyText",
440
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CUSTOM_LOGGING_DATA__DESCRIPTION",
441
+ defaultValue: "",
442
+ condition: {
443
+ key: "advancedLogging",
444
+ value: true
445
+ }
446
+ },
447
+ {
448
+ key: "loggingHeaders",
449
+ type: "keyValuePairs",
450
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_HEADERS__LABEL",
451
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOGGING_HEADERS__DESCRIPTION",
452
+ defaultValue: "{}",
453
+ condition: {
454
+ key: "advancedLogging",
455
+ value: true
456
+ }
457
+ },
458
+ {
459
+ key: "conditionForLogging",
460
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONDITION_FOR_LOGGING__LABEL",
461
+ type: "cognigyText",
462
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__CONDITION_FOR_LOGGING__DESCRIPTION",
463
+ defaultValue: "",
464
+ condition: {
465
+ key: "advancedLogging",
466
+ value: true
467
+ }
468
+ },
418
469
  {
419
470
  key: "logErrorToSystem",
420
471
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_TO_SYSTEM__LABEL",
@@ -810,7 +861,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
810
861
  "timeoutInMs",
811
862
  "maxTokens",
812
863
  "temperature",
813
- "useTextAlternativeForLLM",
864
+ "useTextAlternativeForLLM"
814
865
  ],
815
866
  },
816
867
  {
@@ -835,7 +886,12 @@ export const AI_AGENT_JOB = createNodeDescriptor({
835
886
  "debugLogTokenCount",
836
887
  "debugLogSystemPrompt",
837
888
  "debugLogToolDefinitions",
838
- "debugLogLLMLatency"
889
+ "debugLogLLMLatency",
890
+ "advancedLogging",
891
+ "loggingWebhookUrl",
892
+ "loggingCustomData",
893
+ "conditionForLogging",
894
+ "loggingHeaders",
839
895
  ],
840
896
  }
841
897
  ],
@@ -856,9 +912,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
856
912
  ],
857
913
  tags: ["ai", "aiAgent"],
858
914
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
859
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23;
915
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24;
860
916
  const { api, context, input, profile, flowReferenceId } = cognigy;
861
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
917
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, conditionForLogging, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, loggingHeaders, sessionParams } = config;
862
918
  try {
863
919
  if (!aiAgent) {
864
920
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -906,7 +962,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
906
962
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
907
963
  }
908
964
  }
909
- const _24 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _24, cleanedProfile = __rest(_24, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
965
+ const _25 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _25, cleanedProfile = __rest(_25, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
910
966
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
911
967
  /**
912
968
  * ----- Knowledge Search Section -----
@@ -1151,7 +1207,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1151
1207
  }
1152
1208
  const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
1153
1209
  const _messageId = randomUUID();
1154
- const llmPromptOptions = Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1210
+ const enableAdvancedLogging = advancedLogging && loggingWebhookUrl && (conditionForLogging === "" || !!conditionForLogging);
1211
+ const llmPromptOptions = Object.assign(Object.assign(Object.assign({ prompt: "", chat: systemMessage,
1155
1212
  // Temp fix to override the transcript if needed
1156
1213
  transcript: ((_0 = context === null || context === void 0 ? void 0 : context._cognigy) === null || _0 === void 0 ? void 0 : _0.transcript) ? [...context._cognigy.transcript] : transcript, detailedResults: true, timeoutInMs: timeoutInMs !== null && timeoutInMs !== void 0 ? timeoutInMs : 8000, maxTokens: maxTokens !== null && maxTokens !== void 0 ? maxTokens : 4000, temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7, topP: 1, frequencyPenalty: 0, presencePenalty: 0, responseFormat: "text", stream: storeLocation === "stream", streamOnDataHandler: (text) => {
1157
1214
  var _a;
@@ -1169,7 +1226,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1169
1226
  }
1170
1227
  });
1171
1228
  }
1172
- }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], preventNewLineRemoval: isStreamingChannel ? true : false }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice }));
1229
+ }, streamStopTokens: streamStopTokens || [".", "!", "?", "\\n"], preventNewLineRemoval: isStreamingChannel ? true : false }, (tools.length > 0 && { tools })), (tools.length > 0 && { toolChoice: toolChoice })), (enableAdvancedLogging && {
1230
+ logging: Object.assign(Object.assign({ webhookUrl: loggingWebhookUrl }, (loggingCustomData && { customData: loggingCustomData })), (loggingHeaders && { headers: loggingHeaders }))
1231
+ }));
1173
1232
  // llmProviderReferenceId `default` value is not a responseFormat, rather it is LLM Model default selection.
1174
1233
  if (llmProviderReferenceId && llmProviderReferenceId !== "default") {
1175
1234
  llmPromptOptions["llmProviderReferenceId"] = llmProviderReferenceId;
@@ -1244,20 +1303,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1244
1303
  node: nodeId,
1245
1304
  } }, (isMcpToolCall && {
1246
1305
  mcpServerUrl: (_8 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _8 === void 0 ? void 0 : _8.mcpServerUrl,
1247
- timeout: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.timeout,
1306
+ mcpHeaders: (_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.mcpHeaders,
1307
+ timeout: (_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.timeout,
1248
1308
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1249
1309
  })), { toolCall: mainToolCall }),
1250
1310
  });
1251
1311
  // if there are any parameters/arguments, add them to the input slots
1252
1312
  if (mainToolCall.function.arguments) {
1253
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_11 = (_10 = input.aiAgent) === null || _10 === void 0 ? void 0 : _10.toolArgs) !== null && _11 !== void 0 ? _11 : {}), mainToolCall.function.arguments) });
1313
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_12 = (_11 = input.aiAgent) === null || _11 === void 0 ? void 0 : _11.toolArgs) !== null && _12 !== void 0 ? _12 : {}), mainToolCall.function.arguments) });
1254
1314
  }
1255
1315
  // Debug Message for Tool Calls, configured in the Tool Node
1256
- if ((_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.debugMessage) {
1316
+ if ((_13 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _13 === void 0 ? void 0 : _13.debugMessage) {
1257
1317
  const toolId = isMcpToolCall ? mainToolCall.function.name : yield api.parseCognigyScriptText(toolChild.config.toolId);
1258
1318
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1259
1319
  // Arguments / Parameters Slots
1260
- const slots = ((_13 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _13 === void 0 ? void 0 : _13.arguments) && Object.keys(mainToolCall.function.arguments);
1320
+ const slots = ((_14 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _14 === void 0 ? void 0 : _14.arguments) && Object.keys(mainToolCall.function.arguments);
1261
1321
  const hasSlots = slots && slots.length > 0;
1262
1322
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1263
1323
  if (hasSlots) {
@@ -1272,7 +1332,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1272
1332
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1273
1333
  });
1274
1334
  }
1275
- (_14 = api.logDebugMessage) === null || _14 === void 0 ? void 0 : _14.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1335
+ (_15 = api.logDebugMessage) === null || _15 === void 0 ? void 0 : _15.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1276
1336
  }
1277
1337
  if (toolChild) {
1278
1338
  api.setNextNode(toolChild.id);
@@ -1297,11 +1357,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1297
1357
  }
1298
1358
  // Optionally output the result immediately
1299
1359
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1300
- yield ((_15 = api.output) === null || _15 === void 0 ? void 0 : _15.call(api, llmResult.result, {}));
1360
+ yield ((_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, llmResult.result, {}));
1301
1361
  }
1302
1362
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1303
1363
  // send the finishReason as last output for a stream
1304
- (_16 = api.output) === null || _16 === void 0 ? void 0 : _16.call(api, "", {
1364
+ (_17 = api.output) === null || _17 === void 0 ? void 0 : _17.call(api, "", {
1305
1365
  _cognigy: {
1306
1366
  _preventTranscript: true,
1307
1367
  _messageId,
@@ -1324,7 +1384,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1324
1384
  }
1325
1385
  // Add response to Cognigy Input/Context for further usage
1326
1386
  if (storeLocation === "context") {
1327
- (_17 = api.addToContext) === null || _17 === void 0 ? void 0 : _17.call(api, contextKey, llmResult, "simple");
1387
+ (_18 = api.addToContext) === null || _18 === void 0 ? void 0 : _18.call(api, contextKey, llmResult, "simple");
1328
1388
  }
1329
1389
  else if (storeLocation === "input") {
1330
1390
  api.addToInput(inputKey, llmResult);
@@ -1337,14 +1397,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1337
1397
  const errorDetails = {
1338
1398
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1339
1399
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1340
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_18 = error.originalErrorDetails) === null || _18 === void 0 ? void 0 : _18.message),
1400
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_19 = error.originalErrorDetails) === null || _19 === void 0 ? void 0 : _19.message),
1341
1401
  };
1342
- (_19 = api.emitEvent) === null || _19 === void 0 ? void 0 : _19.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1402
+ (_20 = api.emitEvent) === null || _20 === void 0 ? void 0 : _20.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1343
1403
  if (logErrorToSystem) {
1344
- (_20 = api.log) === null || _20 === void 0 ? void 0 : _20.call(api, "error", JSON.stringify(errorDetails));
1404
+ (_21 = api.log) === null || _21 === void 0 ? void 0 : _21.call(api, "error", JSON.stringify(errorDetails));
1345
1405
  }
1346
1406
  if (errorHandling !== "stop") {
1347
- (_21 = api.logDebugError) === null || _21 === void 0 ? void 0 : _21.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1407
+ (_22 = api.logDebugError) === null || _22 === void 0 ? void 0 : _22.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1348
1408
  }
1349
1409
  if (storeErrorInInput) {
1350
1410
  input.aiAgent = input.aiAgent || {};
@@ -1353,7 +1413,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1353
1413
  if (errorHandling === "continue") {
1354
1414
  // output the timeout message
1355
1415
  if (errorMessage) {
1356
- yield ((_22 = api.output) === null || _22 === void 0 ? void 0 : _22.call(api, errorMessage, null));
1416
+ yield ((_23 = api.output) === null || _23 === void 0 ? void 0 : _23.call(api, errorMessage, null));
1357
1417
  }
1358
1418
  // Set default node as next node
1359
1419
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1365,7 +1425,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1365
1425
  if (!errorHandlingGotoTarget) {
1366
1426
  throw new Error("GoTo Target is required");
1367
1427
  }
1368
- if (!((_23 = api.checkThink) === null || _23 === void 0 ? void 0 : _23.call(api, nodeId))) {
1428
+ if (!((_24 = api.checkThink) === null || _24 === void 0 ? void 0 : _24.call(api, nodeId))) {
1369
1429
  api.resetNextNodes();
1370
1430
  yield api.executeFlow({
1371
1431
  flowNode: {
@@ -92,7 +92,7 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
92
92
  },
93
93
  tags: ["ai", "aiAgent"],
94
94
  function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
95
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
95
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
96
96
  const { api } = cognigy;
97
97
  const { storeLocation, contextKey, inputKey, resolveImmediately, debugToolResult } = config;
98
98
  const sessionState = yield api.loadSessionState();
@@ -100,8 +100,9 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
100
100
  const aiAgentJobNode = (_b = sessionState.lastToolCall) === null || _b === void 0 ? void 0 : _b.aiAgentJobNode;
101
101
  const mcpServerUrl = (_c = sessionState.lastToolCall) === null || _c === void 0 ? void 0 : _c.mcpServerUrl;
102
102
  const timeout = (_d = sessionState.lastToolCall) === null || _d === void 0 ? void 0 : _d.timeout;
103
+ const mcpHeaders = (_e = sessionState.lastToolCall) === null || _e === void 0 ? void 0 : _e.mcpHeaders;
103
104
  if (!(toolCall === null || toolCall === void 0 ? void 0 : toolCall.id)) {
104
- (_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
105
+ (_f = api.logDebugError) === null || _f === void 0 ? void 0 : _f.call(api, "UI__DEBUG_MODE__AI_AGENT_ANSWER__ERROR__MESSAGE");
105
106
  }
106
107
  if (toolCall && aiAgentJobNode && mcpServerUrl && timeout) {
107
108
  let toolResult = null;
@@ -112,10 +113,11 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
112
113
  toolName: toolCall.function.name,
113
114
  toolArgs: toolCall.function.arguments,
114
115
  timeout,
116
+ mcpHeaders: mcpHeaders !== null && mcpHeaders !== void 0 ? mcpHeaders : {},
115
117
  });
116
118
  fullResult = JSON.stringify(toolResult, null, 2);
117
119
  if (debugToolResult) {
118
- (_f = api.logDebugMessage) === null || _f === void 0 ? void 0 : _f.call(api, `Tool <b>${(_g = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _g === void 0 ? void 0 : _g.name}</b> called successfully.<br><br><b>Result:</b><br>${fullResult}`);
120
+ (_g = api.logDebugMessage) === null || _g === void 0 ? void 0 : _g.call(api, `Tool <b>${(_h = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _h === void 0 ? void 0 : _h.name}</b> called successfully.<br><br><b>Result:</b><br>${fullResult}`);
119
121
  }
120
122
  }
121
123
  catch (error) {
@@ -125,11 +127,11 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
125
127
  message: error.message,
126
128
  }
127
129
  : error;
128
- (_h = api.logDebugError) === null || _h === void 0 ? void 0 : _h.call(api, `Failed to execute MCP Tool ${(_j = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _j === void 0 ? void 0 : _j.name}:<br>${JSON.stringify(errorDetails, null, 2)}`);
130
+ (_j = api.logDebugError) === null || _j === void 0 ? void 0 : _j.call(api, `Failed to execute MCP Tool ${(_k = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _k === void 0 ? void 0 : _k.name}:<br>${JSON.stringify(errorDetails, null, 2)}`);
129
131
  }
130
132
  // Add result to Cognigy Input/Context for further usage
131
133
  if (storeLocation === "context") {
132
- (_k = api.addToContext) === null || _k === void 0 ? void 0 : _k.call(api, contextKey, toolResult, "simple");
134
+ (_l = api.addToContext) === null || _l === void 0 ? void 0 : _l.call(api, contextKey, toolResult, "simple");
133
135
  }
134
136
  else if (storeLocation === "input") {
135
137
  api.addToInput(inputKey, toolResult);
@@ -160,6 +160,13 @@ export const AI_AGENT_JOB_MCP_TOOL = createNodeDescriptor({
160
160
  value: "blacklist",
161
161
  },
162
162
  },
163
+ {
164
+ key: "mcpHeaders",
165
+ type: "keyValuePairs",
166
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__LABEL",
167
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
168
+ defaultValue: "{}",
169
+ },
163
170
  ],
164
171
  sections: [
165
172
  {
@@ -172,7 +179,7 @@ export const AI_AGENT_JOB_MCP_TOOL = createNodeDescriptor({
172
179
  key: "advanced",
173
180
  label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__ADVANCED__LABEL",
174
181
  defaultCollapsed: true,
175
- fields: ["cacheTools", "condition", "toolFilter", "whitelist", "blacklist"],
182
+ fields: ["cacheTools", "condition", "toolFilter", "whitelist", "blacklist", "mcpHeaders"],
176
183
  },
177
184
  ],
178
185
  form: [
@@ -72,12 +72,14 @@ export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter
72
72
  const cacheTools = child.config.cacheTools;
73
73
  const sendDebug = child.config.debugMessageFetchedTools;
74
74
  const toolFilter = child.config.toolFilter;
75
+ const mcpHeaders = child.config.mcpHeaders;
75
76
  let mcpTools = null;
76
77
  try {
77
78
  mcpTools = yield api.fetchMcpTools({
78
79
  mcpServerUrl,
79
80
  timeout,
80
81
  cacheTools,
82
+ mcpHeaders,
81
83
  });
82
84
  }
83
85
  catch (error) {