@cognigy/rest-api-client 2026.1.0 → 2026.2.0-rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +5 -0
- package/README.md +15 -0
- package/build/apigroups/InsightsAPIGroup_2_1.js +27 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +134 -383
- package/build/apigroups/SimulationAPIGroup_2_0.js +24 -23
- package/build/apigroups/aiAgentsV2/agent.js +3 -0
- package/build/apigroups/aiAgentsV2/agentAPI.js +38 -0
- package/build/apigroups/aiAgentsV2/agentPersona.js +3 -0
- package/build/apigroups/aiAgentsV2/agentPersonaAPI.js +38 -0
- package/build/apigroups/aiAgentsV2/tool.js +3 -0
- package/build/apigroups/aiAgentsV2/toolAPI.js +35 -0
- package/build/apigroups/aiAgentsV2/toolDescriptor.js +3 -0
- package/build/apigroups/aiAgentsV2/toolDescriptorAPI.js +13 -0
- package/build/apigroups/index.js +3 -1
- package/build/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
- package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +15 -0
- package/build/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +13 -0
- package/build/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +54 -10
- package/build/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +52 -0
- package/build/shared/charts/descriptors/connectionNodes/speechProviders/index.js +8 -7
- package/build/shared/charts/descriptors/index.js +4 -0
- package/build/shared/charts/descriptors/message/question/question.js +249 -59
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +17 -15
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +6 -4
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +57 -1
- package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +7 -0
- package/build/shared/charts/descriptors/service/aiAgentV2.js +89 -0
- package/build/shared/charts/descriptors/service/index.js +5 -1
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +15 -13
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +57 -1
- package/build/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +67 -13
- package/build/shared/charts/descriptors/voice/mappers/transfer.mapper.js +25 -3
- package/build/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +65 -0
- package/build/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
- package/build/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +137 -1
- package/build/shared/charts/descriptors/voicegateway2/nodes/transfer.js +135 -2
- package/build/shared/errors/ErrorCode.js +2 -1
- package/build/shared/errors/ErrorCollection.js +1 -0
- package/build/shared/helper/BaseContext.js +1 -1
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
- package/build/shared/interfaces/handover.js +1 -0
- package/build/shared/interfaces/handoverProviders.js +0 -1
- package/build/shared/interfaces/messageAPI/endpoints.js +3 -0
- package/build/shared/interfaces/resources/IAuditEvent.js +1 -0
- package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
- package/build/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
- package/build/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +3 -0
- package/build/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +3 -0
- package/build/shared/interfaces/security/ISessionScope.js +3 -0
- package/build/spec/aiAgentV2.spec.js +564 -0
- package/dist/esm/apigroups/InsightsAPIGroup_2_1.js +13 -0
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +134 -383
- package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +24 -23
- package/dist/esm/apigroups/aiAgentsV2/agent.js +2 -0
- package/dist/esm/apigroups/aiAgentsV2/agentAPI.js +24 -0
- package/dist/esm/apigroups/aiAgentsV2/agentPersona.js +2 -0
- package/dist/esm/apigroups/aiAgentsV2/agentPersonaAPI.js +24 -0
- package/dist/esm/apigroups/aiAgentsV2/aiAgentV2API.js +2 -0
- package/dist/esm/apigroups/aiAgentsV2/tool.js +2 -0
- package/dist/esm/apigroups/aiAgentsV2/toolAPI.js +21 -0
- package/dist/esm/apigroups/aiAgentsV2/toolDescriptor.js +2 -0
- package/dist/esm/apigroups/aiAgentsV2/toolDescriptorAPI.js +9 -0
- package/dist/esm/apigroups/index.js +1 -0
- package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/index.js +5 -1
- package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2ClientCredentialsConnection.js +12 -0
- package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/oAuth2JwtBearerConnection.js +10 -0
- package/dist/esm/shared/charts/descriptors/connectionNodes/smtp/sendEmail.js +54 -10
- package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/elevenlabsSpeechProviderConnection.js +49 -0
- package/dist/esm/shared/charts/descriptors/connectionNodes/speechProviders/index.js +3 -3
- package/dist/esm/shared/charts/descriptors/index.js +5 -1
- package/dist/esm/shared/charts/descriptors/message/question/question.js +249 -59
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +17 -15
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobCallMCPTool.js +6 -4
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobMCPTool.js +56 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +7 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgentV2.js +87 -0
- package/dist/esm/shared/charts/descriptors/service/index.js +2 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +22 -20
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +56 -0
- package/dist/esm/shared/charts/descriptors/voice/mappers/setSessionConfig.mapper.js +67 -13
- package/dist/esm/shared/charts/descriptors/voice/mappers/transfer.mapper.js +25 -3
- package/dist/esm/shared/charts/descriptors/voice/nodes/sessionSpeechParameters.js +65 -0
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/play.js +7 -0
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/setSessionConfig.js +137 -1
- package/dist/esm/shared/charts/descriptors/voicegateway2/nodes/transfer.js +135 -2
- package/dist/esm/shared/errors/ErrorCode.js +2 -1
- package/dist/esm/shared/errors/ErrorCollection.js +1 -0
- package/dist/esm/shared/helper/BaseContext.js +1 -1
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +1 -0
- package/dist/esm/shared/interfaces/handover.js +1 -0
- package/dist/esm/shared/interfaces/handoverProviders.js +0 -1
- package/dist/esm/shared/interfaces/messageAPI/endpoints.js +3 -0
- package/dist/esm/shared/interfaces/resources/IAuditEvent.js +1 -0
- package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeSource.js +1 -1
- package/dist/esm/shared/interfaces/resources/settings/IAudioPreviewSettings.js +7 -1
- package/dist/esm/shared/interfaces/restAPI/analytics/IDeleteConversationsBySessionRest_2_1.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/resources/knowledgeStore/v2.0/connector/IRunKnowledgeConnectorRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSimulationOverviewMetricsRestData_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetSuccessRateTrendRestData_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/simulationOverview/IGetUpcomingScheduledRunsRestData_2_0.js +2 -0
- package/dist/esm/shared/interfaces/security/ISessionScope.js +2 -0
- package/dist/esm/spec/aiAgentV2.spec.js +563 -0
- package/package.json +6 -3
- package/types/index.d.ts +667 -30
|
@@ -914,7 +914,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
914
914
|
],
|
|
915
915
|
tags: ["ai", "aiAgent"],
|
|
916
916
|
function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
917
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25;
|
|
917
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27;
|
|
918
918
|
const { api, context, input, profile, flowReferenceId } = cognigy;
|
|
919
919
|
const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, conditionForLogging, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, loggingHeaders, sessionParams } = config;
|
|
920
920
|
try {
|
|
@@ -964,7 +964,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
964
964
|
throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
|
|
965
965
|
}
|
|
966
966
|
}
|
|
967
|
-
const
|
|
967
|
+
const _28 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _28, cleanedProfile = __rest(_28, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
|
|
968
968
|
const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
|
|
969
969
|
/**
|
|
970
970
|
* ----- Knowledge Search Section -----
|
|
@@ -1311,18 +1311,20 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1311
1311
|
mcpHeaders,
|
|
1312
1312
|
timeout: (_11 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _11 === void 0 ? void 0 : _11.timeout,
|
|
1313
1313
|
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
1314
|
+
authType: (_12 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _12 === void 0 ? void 0 : _12.authType,
|
|
1315
|
+
oAuth2Connection: (_13 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _13 === void 0 ? void 0 : _13.oAuth2Connection,
|
|
1314
1316
|
})), { toolCall: mainToolCall }),
|
|
1315
1317
|
});
|
|
1316
1318
|
// if there are any parameters/arguments, add them to the input slots
|
|
1317
1319
|
if (mainToolCall.function.arguments) {
|
|
1318
|
-
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (
|
|
1320
|
+
input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_15 = (_14 = input.aiAgent) === null || _14 === void 0 ? void 0 : _14.toolArgs) !== null && _15 !== void 0 ? _15 : {}), mainToolCall.function.arguments) });
|
|
1319
1321
|
}
|
|
1320
1322
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
1321
|
-
if ((
|
|
1323
|
+
if ((_16 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _16 === void 0 ? void 0 : _16.debugMessage) {
|
|
1322
1324
|
const toolId = isMcpToolCall ? mainToolCall.function.name : yield api.parseCognigyScriptText(toolChild.config.toolId);
|
|
1323
1325
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
|
|
1324
1326
|
// Arguments / Parameters Slots
|
|
1325
|
-
const slots = ((
|
|
1327
|
+
const slots = ((_17 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _17 === void 0 ? void 0 : _17.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
1326
1328
|
const hasSlots = slots && slots.length > 0;
|
|
1327
1329
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
1328
1330
|
if (hasSlots) {
|
|
@@ -1337,7 +1339,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1337
1339
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
1338
1340
|
});
|
|
1339
1341
|
}
|
|
1340
|
-
(
|
|
1342
|
+
(_18 = api.logDebugMessage) === null || _18 === void 0 ? void 0 : _18.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
1341
1343
|
}
|
|
1342
1344
|
if (toolChild) {
|
|
1343
1345
|
api.setNextNode(toolChild.id);
|
|
@@ -1362,11 +1364,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1362
1364
|
}
|
|
1363
1365
|
// Optionally output the result immediately
|
|
1364
1366
|
if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
|
|
1365
|
-
yield ((
|
|
1367
|
+
yield ((_19 = api.output) === null || _19 === void 0 ? void 0 : _19.call(api, llmResult.result, {}));
|
|
1366
1368
|
}
|
|
1367
1369
|
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
1368
1370
|
// send the finishReason as last output for a stream
|
|
1369
|
-
(
|
|
1371
|
+
(_20 = api.output) === null || _20 === void 0 ? void 0 : _20.call(api, "", {
|
|
1370
1372
|
_cognigy: {
|
|
1371
1373
|
_preventTranscript: true,
|
|
1372
1374
|
_messageId,
|
|
@@ -1389,7 +1391,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1389
1391
|
}
|
|
1390
1392
|
// Add response to Cognigy Input/Context for further usage
|
|
1391
1393
|
if (storeLocation === "context") {
|
|
1392
|
-
(
|
|
1394
|
+
(_21 = api.addToContext) === null || _21 === void 0 ? void 0 : _21.call(api, contextKey, llmResult, "simple");
|
|
1393
1395
|
}
|
|
1394
1396
|
else if (storeLocation === "input") {
|
|
1395
1397
|
api.addToInput(inputKey, llmResult);
|
|
@@ -1402,14 +1404,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1402
1404
|
const errorDetails = {
|
|
1403
1405
|
name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
|
|
1404
1406
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
1405
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
1407
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_22 = error.originalErrorDetails) === null || _22 === void 0 ? void 0 : _22.message),
|
|
1406
1408
|
};
|
|
1407
|
-
(
|
|
1409
|
+
(_23 = api.emitEvent) === null || _23 === void 0 ? void 0 : _23.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
|
|
1408
1410
|
if (logErrorToSystem) {
|
|
1409
|
-
(
|
|
1411
|
+
(_24 = api.log) === null || _24 === void 0 ? void 0 : _24.call(api, "error", JSON.stringify(errorDetails));
|
|
1410
1412
|
}
|
|
1411
1413
|
if (errorHandling !== "stop") {
|
|
1412
|
-
(
|
|
1414
|
+
(_25 = api.logDebugError) === null || _25 === void 0 ? void 0 : _25.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
|
|
1413
1415
|
}
|
|
1414
1416
|
if (storeErrorInInput) {
|
|
1415
1417
|
input.aiAgent = input.aiAgent || {};
|
|
@@ -1418,7 +1420,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1418
1420
|
if (errorHandling === "continue") {
|
|
1419
1421
|
// output the timeout message
|
|
1420
1422
|
if (errorMessage) {
|
|
1421
|
-
yield ((
|
|
1423
|
+
yield ((_26 = api.output) === null || _26 === void 0 ? void 0 : _26.call(api, errorMessage, null));
|
|
1422
1424
|
}
|
|
1423
1425
|
// Set default node as next node
|
|
1424
1426
|
const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
|
|
@@ -1430,7 +1432,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
|
|
|
1430
1432
|
if (!errorHandlingGotoTarget) {
|
|
1431
1433
|
throw new Error("GoTo Target is required");
|
|
1432
1434
|
}
|
|
1433
|
-
if (!((
|
|
1435
|
+
if (!((_27 = api.checkThink) === null || _27 === void 0 ? void 0 : _27.call(api, nodeId))) {
|
|
1434
1436
|
api.resetNextNodes();
|
|
1435
1437
|
yield api.executeFlow({
|
|
1436
1438
|
flowNode: {
|
|
@@ -93,7 +93,7 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
|
|
|
93
93
|
},
|
|
94
94
|
tags: ["ai", "aiAgent"],
|
|
95
95
|
function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
96
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
|
|
96
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p;
|
|
97
97
|
const { api } = cognigy;
|
|
98
98
|
const { storeLocation, contextKey, inputKey, resolveImmediately, debugToolResult } = config;
|
|
99
99
|
const sessionState = yield api.loadSessionState();
|
|
@@ -116,10 +116,12 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
|
|
|
116
116
|
toolArgs: toolCall.function.arguments,
|
|
117
117
|
timeout,
|
|
118
118
|
mcpHeaders: mcpHeaders !== null && mcpHeaders !== void 0 ? mcpHeaders : {},
|
|
119
|
+
authType: (_h = (_g = sessionState.lastToolCall) === null || _g === void 0 ? void 0 : _g.authType) !== null && _h !== void 0 ? _h : "none",
|
|
120
|
+
oAuth2Connection: (_j = sessionState.lastToolCall) === null || _j === void 0 ? void 0 : _j.oAuth2Connection,
|
|
119
121
|
});
|
|
120
122
|
fullResult = JSON.stringify(toolResult, null, 2);
|
|
121
123
|
if (debugToolResult) {
|
|
122
|
-
(
|
|
124
|
+
(_k = api.logDebugMessage) === null || _k === void 0 ? void 0 : _k.call(api, `Tool <b>${(_l = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _l === void 0 ? void 0 : _l.name}</b> called successfully.<br><br><b>Result:</b><br>${fullResult}`);
|
|
123
125
|
}
|
|
124
126
|
}
|
|
125
127
|
catch (error) {
|
|
@@ -129,11 +131,11 @@ export const AI_AGENT_JOB_CALL_MCP_TOOL = createNodeDescriptor({
|
|
|
129
131
|
message: error.message,
|
|
130
132
|
}
|
|
131
133
|
: error;
|
|
132
|
-
(
|
|
134
|
+
(_m = api.logDebugError) === null || _m === void 0 ? void 0 : _m.call(api, `Failed to execute MCP Tool ${(_o = toolCall === null || toolCall === void 0 ? void 0 : toolCall.function) === null || _o === void 0 ? void 0 : _o.name}:<br>${JSON.stringify(errorDetails, null, 2)}`);
|
|
133
135
|
}
|
|
134
136
|
// Add result to Cognigy Input/Context for further usage
|
|
135
137
|
if (storeLocation === "context") {
|
|
136
|
-
(
|
|
138
|
+
(_p = api.addToContext) === null || _p === void 0 ? void 0 : _p.call(api, contextKey, toolResult, "simple");
|
|
137
139
|
}
|
|
138
140
|
else if (storeLocation === "input") {
|
|
139
141
|
api.addToInput(inputKey, toolResult);
|
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
/* Custom modules */
|
|
2
2
|
import { createNodeDescriptor } from "../../../createNodeDescriptor";
|
|
3
|
+
export const AI_AGENT_MCP_TOOL_CONNECTION_OAUTH2 = {
|
|
4
|
+
type: "mcp_oauth2",
|
|
5
|
+
label: "UI__NODE_EDITOR__MCP_OAUTH2_CONNECTION__LABEL",
|
|
6
|
+
fields: [
|
|
7
|
+
{ fieldName: "oAuth2Url", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_URL" },
|
|
8
|
+
{ fieldName: "oAuth2ClientId", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_ID" },
|
|
9
|
+
{ fieldName: "oAuth2ClientSecret", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_SECRET" },
|
|
10
|
+
{ fieldName: "oAuth2Scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" }
|
|
11
|
+
]
|
|
12
|
+
};
|
|
3
13
|
export const AI_AGENT_JOB_MCP_TOOL = createNodeDescriptor({
|
|
4
14
|
type: "aiAgentJobMCPTool",
|
|
5
15
|
defaultLabel: "MCP Tool",
|
|
@@ -167,8 +177,53 @@ export const AI_AGENT_JOB_MCP_TOOL = createNodeDescriptor({
|
|
|
167
177
|
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
|
|
168
178
|
defaultValue: "{}",
|
|
169
179
|
},
|
|
180
|
+
{
|
|
181
|
+
key: "oAuth2Connection",
|
|
182
|
+
type: "connection",
|
|
183
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__OAUTH2_CONNECTION__LABEL",
|
|
184
|
+
params: {
|
|
185
|
+
connectionType: AI_AGENT_MCP_TOOL_CONNECTION_OAUTH2.type
|
|
186
|
+
},
|
|
187
|
+
condition: {
|
|
188
|
+
key: "authType",
|
|
189
|
+
value: "oAuth2"
|
|
190
|
+
}
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
key: "authType",
|
|
194
|
+
type: "select",
|
|
195
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__LABEL",
|
|
196
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__DESCRIPTION",
|
|
197
|
+
defaultValue: "none",
|
|
198
|
+
params: {
|
|
199
|
+
required: true,
|
|
200
|
+
options: [
|
|
201
|
+
{
|
|
202
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__NONE__LABEL",
|
|
203
|
+
value: "none"
|
|
204
|
+
},
|
|
205
|
+
{
|
|
206
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__OAUTH2__LABEL",
|
|
207
|
+
value: "oAuth2"
|
|
208
|
+
}
|
|
209
|
+
]
|
|
210
|
+
},
|
|
211
|
+
resetOption: {
|
|
212
|
+
lookupValue: "none",
|
|
213
|
+
fieldsToReset: ["oAuth2Connection"]
|
|
214
|
+
}
|
|
215
|
+
},
|
|
170
216
|
],
|
|
171
217
|
sections: [
|
|
218
|
+
{
|
|
219
|
+
key: "auth",
|
|
220
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__SECTIONS__AUTHENTICATION__LABEL",
|
|
221
|
+
defaultCollapsed: true,
|
|
222
|
+
fields: [
|
|
223
|
+
"authType",
|
|
224
|
+
"oAuth2Connection"
|
|
225
|
+
]
|
|
226
|
+
},
|
|
172
227
|
{
|
|
173
228
|
key: "debugging",
|
|
174
229
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
|
|
@@ -188,6 +243,7 @@ export const AI_AGENT_JOB_MCP_TOOL = createNodeDescriptor({
|
|
|
188
243
|
{ type: "field", key: "mcpWarning" },
|
|
189
244
|
{ type: "field", key: "mcpServerUrl" },
|
|
190
245
|
{ type: "field", key: "timeout" },
|
|
246
|
+
{ type: "section", key: "auth" },
|
|
191
247
|
{ type: "section", key: "debugging" },
|
|
192
248
|
{ type: "section", key: "advanced" },
|
|
193
249
|
],
|
|
@@ -54,6 +54,13 @@ export const createToolDefinitions = (childConfigs, api, useStrict) => __awaiter
|
|
|
54
54
|
timeout,
|
|
55
55
|
cacheTools,
|
|
56
56
|
mcpHeaders,
|
|
57
|
+
authType: child.config.authType,
|
|
58
|
+
oAuth2Connection: child.config.authType === "oAuth2" ? {
|
|
59
|
+
oAuth2Url: child.config.oAuth2Connection.oAuth2Url,
|
|
60
|
+
oAuth2ClientId: child.config.oAuth2Connection.oAuth2ClientId,
|
|
61
|
+
oAuth2ClientSecret: child.config.oAuth2Connection.oAuth2ClientSecret,
|
|
62
|
+
oAuth2Scope: child.config.oAuth2Connection.oAuth2Scope,
|
|
63
|
+
} : undefined,
|
|
57
64
|
});
|
|
58
65
|
mcpTools = fetched.tools;
|
|
59
66
|
fetchedFromCache = fetched.fromCache;
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { __awaiter } from "tslib";
|
|
2
|
+
/* Custom Modules */
|
|
3
|
+
import { createNodeDescriptor } from "../../createNodeDescriptor";
|
|
4
|
+
/**
|
|
5
|
+
* This is a prototype!
|
|
6
|
+
*/
|
|
7
|
+
export const AI_AGENT_V2 = createNodeDescriptor({
|
|
8
|
+
type: "aiAgentV2",
|
|
9
|
+
defaultLabel: "AI Agent V2",
|
|
10
|
+
summary: "AI Agent V2",
|
|
11
|
+
fields: [
|
|
12
|
+
{
|
|
13
|
+
key: "agentId",
|
|
14
|
+
type: "cognigyText",
|
|
15
|
+
label: " ",
|
|
16
|
+
params: {
|
|
17
|
+
required: true,
|
|
18
|
+
}
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
key: "apiKeyAuthKeyConnection",
|
|
22
|
+
type: "connection",
|
|
23
|
+
label: "apiKey",
|
|
24
|
+
params: {
|
|
25
|
+
connectionType: "http_apiKeyAuthKey"
|
|
26
|
+
},
|
|
27
|
+
}
|
|
28
|
+
],
|
|
29
|
+
tags: ["ai"],
|
|
30
|
+
function: ({ cognigy, config }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
31
|
+
var _a;
|
|
32
|
+
const { api, input } = cognigy;
|
|
33
|
+
const agentId = config.agentId;
|
|
34
|
+
const apiKey = (_a = config.apiKeyAuthKeyConnection) === null || _a === void 0 ? void 0 : _a.authApiKey;
|
|
35
|
+
const text = input.text || '';
|
|
36
|
+
if (!apiKey) {
|
|
37
|
+
throw new Error("Unable to execute request. No api key provided.");
|
|
38
|
+
}
|
|
39
|
+
if (!agentId) {
|
|
40
|
+
throw new Error("Unable to execute request. No agent ID provided.");
|
|
41
|
+
}
|
|
42
|
+
const transcript = yield api.getTranscript({ limit: 50 });
|
|
43
|
+
// TODO: temp hack, convertTranscript() from ll-providers package
|
|
44
|
+
const formattedTranscript = transcript
|
|
45
|
+
// keep only entries that actually have text
|
|
46
|
+
.filter(entry => (entry === null || entry === void 0 ? void 0 : entry.payload) && typeof entry.payload.text === "string")
|
|
47
|
+
.map(entry => {
|
|
48
|
+
// map to OpenAI-style role strings
|
|
49
|
+
const role = entry.role === "user"
|
|
50
|
+
? "user"
|
|
51
|
+
: entry.role === "assistant"
|
|
52
|
+
? "assistant"
|
|
53
|
+
: "system";
|
|
54
|
+
const text = entry.payload.text;
|
|
55
|
+
return `${role}: ${text}`;
|
|
56
|
+
})
|
|
57
|
+
.join("\n");
|
|
58
|
+
const resp = yield api.sendAiAgentV2Turn({
|
|
59
|
+
agentReferenceId: agentId,
|
|
60
|
+
userInput: text,
|
|
61
|
+
conversationHistory: formattedTranscript,
|
|
62
|
+
openaiApiKey: apiKey,
|
|
63
|
+
});
|
|
64
|
+
if (resp.response && typeof resp.response === 'string') {
|
|
65
|
+
api.output(resp.response, {
|
|
66
|
+
_cognigy: {
|
|
67
|
+
_preventTranscript: true
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
const transcriptContent = {
|
|
71
|
+
role: "assistant",
|
|
72
|
+
type: "output",
|
|
73
|
+
source: "assistant",
|
|
74
|
+
payload: {
|
|
75
|
+
text: resp.response,
|
|
76
|
+
data: {},
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
yield api.addTranscriptStep(transcriptContent);
|
|
80
|
+
}
|
|
81
|
+
if (resp.toolCalls && Array.isArray(resp.toolCalls)) {
|
|
82
|
+
input.toolCalls = resp.toolCalls;
|
|
83
|
+
}
|
|
84
|
+
return;
|
|
85
|
+
})
|
|
86
|
+
});
|
|
87
|
+
//# sourceMappingURL=aiAgentV2.js.map
|
|
@@ -34,7 +34,9 @@ export { HANDOVER_TO_HUMAN_AGENT_TOOL } from "./agentTools/handoverToHumanAgentT
|
|
|
34
34
|
export { SEND_EMAIL_TOOL } from "./agentTools/sendEmailTool";
|
|
35
35
|
export { EXECUTE_WORKFLOW_TOOL } from "./agentTools/executeWorkflowTool";
|
|
36
36
|
export { LOAD_AI_AGENT } from "./aiAgent/loadAiAgent";
|
|
37
|
+
export { AI_AGENT_MCP_TOOL_CONNECTION_OAUTH2 } from "./aiAgent/aiAgentJobMCPTool";
|
|
37
38
|
export { LIVE_AGENT_CONNECTION, RINGCENTRAL_ENGAGE_CONNECTION, CHATWOOT_CONNECTION, EIGHT_BY_EIGHT_CONNECTION, GENESYS_CLOUD_CONNECTION, GENESYS_CLOUD_CONNECTION_OM, STORM_CONNECTION } from "./handoverConnections";
|
|
38
39
|
export { NICECXONEAAH_AUTHENTICATION_CONNECTION } from "./niceCXOneAAHAuthenticationConnection";
|
|
39
40
|
export { AIOPS_CENTER_WEBHOOKS_CONNECTION } from "./aiOpsCenterConnection";
|
|
41
|
+
export { AI_AGENT_V2 } from "./aiAgentV2";
|
|
40
42
|
//# sourceMappingURL=index.js.map
|
|
@@ -694,7 +694,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
694
694
|
defaultMockCode: `input.llmResponse = {response: "Mock response"};`
|
|
695
695
|
},
|
|
696
696
|
function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
697
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
|
|
697
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0;
|
|
698
698
|
const { api, input, flowReferenceId } = cognigy;
|
|
699
699
|
const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
|
|
700
700
|
errorHandlingGotoTarget, errorMessage, useTextAlternativeForLLM, advancedLogging, loggingWebhookUrl, loggingCustomData, loggingHeaders, conditionForLogging, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
|
|
@@ -722,17 +722,17 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
722
722
|
}
|
|
723
723
|
// handle errors from external services, depending on the settings
|
|
724
724
|
const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
|
|
725
|
-
var
|
|
725
|
+
var _1, _2, _3, _4, _5, _6;
|
|
726
726
|
const compactError = {
|
|
727
727
|
name: error === null || error === void 0 ? void 0 : error.name,
|
|
728
728
|
code: error === null || error === void 0 ? void 0 : error.code,
|
|
729
729
|
message: (error === null || error === void 0 ? void 0 : error.message) || error
|
|
730
730
|
};
|
|
731
731
|
// return the requestId if it exist in the error obj.
|
|
732
|
-
if ((
|
|
733
|
-
compactError["requestId"] = (
|
|
732
|
+
if ((_1 = error === null || error === void 0 ? void 0 : error.meta) === null || _1 === void 0 ? void 0 : _1.requestId) {
|
|
733
|
+
compactError["requestId"] = (_2 = error === null || error === void 0 ? void 0 : error.meta) === null || _2 === void 0 ? void 0 : _2.requestId;
|
|
734
734
|
}
|
|
735
|
-
if ((
|
|
735
|
+
if ((_3 = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _3 === void 0 ? void 0 : _3.code) {
|
|
736
736
|
compactError.code = error.originalErrorDetails.code;
|
|
737
737
|
}
|
|
738
738
|
const errorResponse = {
|
|
@@ -741,7 +741,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
741
741
|
// add error to context or input
|
|
742
742
|
switch (storeLocation) {
|
|
743
743
|
case "context":
|
|
744
|
-
(
|
|
744
|
+
(_4 = api.addToContext) === null || _4 === void 0 ? void 0 : _4.call(api, contextKey, errorResponse, "simple");
|
|
745
745
|
break;
|
|
746
746
|
default:
|
|
747
747
|
api.addToInput(inputKey, errorResponse);
|
|
@@ -749,7 +749,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
749
749
|
if (errorHandling === "continue") {
|
|
750
750
|
// output the timeout message
|
|
751
751
|
if (errorMessage) {
|
|
752
|
-
yield ((
|
|
752
|
+
yield ((_5 = api.output) === null || _5 === void 0 ? void 0 : _5.call(api, errorMessage, null));
|
|
753
753
|
}
|
|
754
754
|
// Continue with default node as next node
|
|
755
755
|
const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
|
|
@@ -776,7 +776,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
776
776
|
absorbContext: false
|
|
777
777
|
}
|
|
778
778
|
};
|
|
779
|
-
yield ((
|
|
779
|
+
yield ((_6 = GO_TO.function) === null || _6 === void 0 ? void 0 : _6.call(GO_TO, gotoParams));
|
|
780
780
|
}
|
|
781
781
|
else {
|
|
782
782
|
throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
|
|
@@ -919,17 +919,19 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
919
919
|
mcpHeaders,
|
|
920
920
|
timeout: (_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.timeout,
|
|
921
921
|
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
922
|
+
authType: (_l = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _l === void 0 ? void 0 : _l.authType,
|
|
923
|
+
oAuth2Connection: (_m = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _m === void 0 ? void 0 : _m.oAuth2Connection,
|
|
922
924
|
})), { toolCall: mainToolCall }),
|
|
923
925
|
});
|
|
924
926
|
// if there are any parameters/arguments, add them to the input slots
|
|
925
927
|
if (mainToolCall.function.arguments) {
|
|
926
|
-
input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (
|
|
928
|
+
input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_p = (_o = input.llmPrompt) === null || _o === void 0 ? void 0 : _o.toolArgs) !== null && _p !== void 0 ? _p : {}), mainToolCall.function.arguments) });
|
|
927
929
|
}
|
|
928
930
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
929
|
-
if ((
|
|
931
|
+
if ((_q = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _q === void 0 ? void 0 : _q.debugMessage) {
|
|
930
932
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${yield api.parseCognigyScriptText(toolChild.config.toolId)}`];
|
|
931
933
|
// Arguments / Parameters Slots
|
|
932
|
-
const slots = ((
|
|
934
|
+
const slots = ((_r = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _r === void 0 ? void 0 : _r.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
933
935
|
const hasSlots = slots && slots.length > 0;
|
|
934
936
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
935
937
|
if (hasSlots) {
|
|
@@ -944,7 +946,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
944
946
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
945
947
|
});
|
|
946
948
|
}
|
|
947
|
-
(
|
|
949
|
+
(_s = api.logDebugMessage) === null || _s === void 0 ? void 0 : _s.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
948
950
|
}
|
|
949
951
|
if (toolChild) {
|
|
950
952
|
api.setNextNode(toolChild.id);
|
|
@@ -969,11 +971,11 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
969
971
|
// we stringify objects (e.g. results coming from JSON Mode)
|
|
970
972
|
// so that the transcript only contains text
|
|
971
973
|
const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
|
|
972
|
-
yield ((
|
|
974
|
+
yield ((_t = api.output) === null || _t === void 0 ? void 0 : _t.call(api, resultToOutput, {}));
|
|
973
975
|
}
|
|
974
976
|
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
975
977
|
// send the finishReason as last output for a stream
|
|
976
|
-
(
|
|
978
|
+
(_u = api.output) === null || _u === void 0 ? void 0 : _u.call(api, "", {
|
|
977
979
|
_cognigy: {
|
|
978
980
|
_preventTranscript: true,
|
|
979
981
|
_messageId,
|
|
@@ -996,7 +998,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
996
998
|
}
|
|
997
999
|
// Add response to Cognigy Input/Context for further usage
|
|
998
1000
|
if (storeLocation === "context") {
|
|
999
|
-
(
|
|
1001
|
+
(_v = api.addToContext) === null || _v === void 0 ? void 0 : _v.call(api, contextKey, llmResult, "simple");
|
|
1000
1002
|
}
|
|
1001
1003
|
else if (storeLocation === "input") {
|
|
1002
1004
|
api.addToInput(inputKey, llmResult);
|
|
@@ -1009,19 +1011,19 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
1009
1011
|
const errorDetailsBase = {
|
|
1010
1012
|
name: error === null || error === void 0 ? void 0 : error.name,
|
|
1011
1013
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
1012
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
1014
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_w = error.originalErrorDetails) === null || _w === void 0 ? void 0 : _w.message),
|
|
1013
1015
|
};
|
|
1014
1016
|
const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
|
|
1015
1017
|
// return the requestId if it exist in the error obj.
|
|
1016
|
-
if ((
|
|
1018
|
+
if ((_x = error.meta) === null || _x === void 0 ? void 0 : _x.requestId) {
|
|
1017
1019
|
errorDetails["meta"] = {
|
|
1018
|
-
requestId: (
|
|
1020
|
+
requestId: (_y = error.meta) === null || _y === void 0 ? void 0 : _y.requestId
|
|
1019
1021
|
};
|
|
1020
1022
|
}
|
|
1021
1023
|
if (logErrorToSystem) {
|
|
1022
|
-
(
|
|
1024
|
+
(_z = api.log) === null || _z === void 0 ? void 0 : _z.call(api, "error", JSON.stringify(errorDetailsBase));
|
|
1023
1025
|
}
|
|
1024
|
-
(
|
|
1026
|
+
(_0 = api.logDebugError) === null || _0 === void 0 ? void 0 : _0.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
|
|
1025
1027
|
yield handleServiceError(errorDetails);
|
|
1026
1028
|
return;
|
|
1027
1029
|
}
|
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
/* Custom modules */
|
|
2
2
|
import { createNodeDescriptor } from "../../../createNodeDescriptor";
|
|
3
|
+
export const LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2 = {
|
|
4
|
+
type: "mcp_oauth2",
|
|
5
|
+
label: "UI__NODE_EDITOR__MCP_OAUTH2_CONNECTION__LABEL",
|
|
6
|
+
fields: [
|
|
7
|
+
{ fieldName: "oAuth2Url", label: "UI__CONNECTION_EDITOR__FIELD_OAUTH2_URL" },
|
|
8
|
+
{ fieldName: "oAuth2ClientId", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_ID" },
|
|
9
|
+
{ fieldName: "oAuth2ClientSecret", label: "UI__CONNECTION_EDITOR__FIELD_CLIENT_SECRET" },
|
|
10
|
+
{ fieldName: "oAuth2Scope", label: "UI__CONNECTION_EDITOR__FIELD_SCOPE" }
|
|
11
|
+
]
|
|
12
|
+
};
|
|
3
13
|
export const LLM_PROMPT_MCP_TOOL = createNodeDescriptor({
|
|
4
14
|
type: "llmPromptMCPTool",
|
|
5
15
|
defaultLabel: "MCP Tool",
|
|
@@ -167,8 +177,53 @@ export const LLM_PROMPT_MCP_TOOL = createNodeDescriptor({
|
|
|
167
177
|
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__HEADERS__DESCRIPTION",
|
|
168
178
|
defaultValue: "{}",
|
|
169
179
|
},
|
|
180
|
+
{
|
|
181
|
+
key: "oAuth2Connection",
|
|
182
|
+
type: "connection",
|
|
183
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__OAUTH2_CONNECTION__LABEL",
|
|
184
|
+
params: {
|
|
185
|
+
connectionType: LLM_PROMPT_MCP_TOOL_CONNECTION_OAUTH2.type
|
|
186
|
+
},
|
|
187
|
+
condition: {
|
|
188
|
+
key: "authType",
|
|
189
|
+
value: "oAuth2"
|
|
190
|
+
}
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
key: "authType",
|
|
194
|
+
type: "select",
|
|
195
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__LABEL",
|
|
196
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__DESCRIPTION",
|
|
197
|
+
defaultValue: "none",
|
|
198
|
+
params: {
|
|
199
|
+
required: true,
|
|
200
|
+
options: [
|
|
201
|
+
{
|
|
202
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__NONE__LABEL",
|
|
203
|
+
value: "none"
|
|
204
|
+
},
|
|
205
|
+
{
|
|
206
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__FIELDS__AUTH_TYPE__OPTIONS__OAUTH2__LABEL",
|
|
207
|
+
value: "oAuth2"
|
|
208
|
+
}
|
|
209
|
+
]
|
|
210
|
+
},
|
|
211
|
+
resetOption: {
|
|
212
|
+
lookupValue: "none",
|
|
213
|
+
fieldsToReset: ["oAuth2Connection"]
|
|
214
|
+
}
|
|
215
|
+
},
|
|
170
216
|
],
|
|
171
217
|
sections: [
|
|
218
|
+
{
|
|
219
|
+
key: "auth",
|
|
220
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_MCP_TOOL__SECTIONS__AUTHENTICATION__LABEL",
|
|
221
|
+
defaultCollapsed: true,
|
|
222
|
+
fields: [
|
|
223
|
+
"authType",
|
|
224
|
+
"oAuth2Connection"
|
|
225
|
+
]
|
|
226
|
+
},
|
|
172
227
|
{
|
|
173
228
|
key: "debugging",
|
|
174
229
|
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__SECTIONS__DEBUG_SETTINGS__LABEL",
|
|
@@ -188,6 +243,7 @@ export const LLM_PROMPT_MCP_TOOL = createNodeDescriptor({
|
|
|
188
243
|
{ type: "field", key: "mcpWarning" },
|
|
189
244
|
{ type: "field", key: "mcpServerUrl" },
|
|
190
245
|
{ type: "field", key: "timeout" },
|
|
246
|
+
{ type: "section", key: "auth" },
|
|
191
247
|
{ type: "section", key: "debugging" },
|
|
192
248
|
{ type: "section", key: "advanced" },
|
|
193
249
|
],
|