@cognigy/rest-api-client 2025.13.0 → 2025.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -1
- package/build/apigroups/SimulationAPIGroup_2_0.js +5 -1
- package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +31 -4
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
- package/build/shared/charts/descriptors/service/GPTPrompt.js +31 -0
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +58 -21
- package/build/shared/helper/BaseContext.js +3 -1
- package/build/shared/interfaces/messageAPI/handover.js +6 -0
- package/build/shared/interfaces/resources/IAiAgent.js +1 -1
- package/build/shared/interfaces/resources/IExtension.js +12 -13
- package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +45 -0
- package/build/shared/interfaces/resources/ISimulation.js +9 -0
- package/build/shared/interfaces/resources/TResourceType.js +3 -0
- package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IGenerateBulkPersonaRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +6 -1
- package/build/shared/interfaces/security/IPermission.js +2 -0
- package/build/shared/interfaces/security/IRole.js +3 -0
- package/build/shared/interfaces/security/index.js +1 -1
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -1
- package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +5 -1
- package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +31 -4
- package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
- package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +31 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +65 -28
- package/dist/esm/shared/helper/BaseContext.js +3 -1
- package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
- package/dist/esm/shared/interfaces/resources/IAiAgent.js +1 -1
- package/dist/esm/shared/interfaces/resources/IExtension.js +12 -13
- package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +42 -0
- package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
- package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
- package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGenerateBulkPersonaRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +5 -0
- package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
- package/dist/esm/shared/interfaces/security/IRole.js +3 -0
- package/dist/esm/shared/interfaces/security/index.js +1 -1
- package/package.json +1 -1
- package/types/index.d.ts +776 -433
|
@@ -318,9 +318,16 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
318
318
|
},
|
|
319
319
|
{
|
|
320
320
|
key: "debugLogRequestAndCompletion",
|
|
321
|
-
label: "
|
|
321
|
+
label: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__LABEL",
|
|
322
322
|
type: "toggle",
|
|
323
|
-
description: "
|
|
323
|
+
description: "UI__NODE_EDITOR__SERVICE__LLM_PROMPT__FIELDS__LOG_SYSTEM_PROMPT_AND_COMPLETION__DESCRIPTION",
|
|
324
|
+
defaultValue: false
|
|
325
|
+
},
|
|
326
|
+
{
|
|
327
|
+
key: "debugLogLLMLatency",
|
|
328
|
+
label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
|
|
329
|
+
type: "toggle",
|
|
330
|
+
description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
|
|
324
331
|
defaultValue: false
|
|
325
332
|
},
|
|
326
333
|
{
|
|
@@ -579,6 +586,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
579
586
|
"debugDescription",
|
|
580
587
|
"debugLogTokenCount",
|
|
581
588
|
"debugLogRequestAndCompletion",
|
|
589
|
+
"debugLogLLMLatency",
|
|
582
590
|
"debugLogToolDefinitions"
|
|
583
591
|
]
|
|
584
592
|
},
|
|
@@ -617,9 +625,9 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
617
625
|
},
|
|
618
626
|
tags: ["ai", "llm", "gpt", "generative ai", "openai", "azure", "prompt"],
|
|
619
627
|
function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
|
|
620
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u;
|
|
628
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v;
|
|
621
629
|
const { api, input, flowReferenceId } = cognigy;
|
|
622
|
-
const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
|
|
630
|
+
const { temperature, maxTokens, topP, presencePenalty, frequencyPenalty, useStop, stop, storeLocation, contextKey, inputKey, timeout, streamStopTokens, streamStopTokenOverrides, debugLogTokenCount, debugLogRequestAndCompletion, debugLogLLMLatency, debugLogToolDefinitions, llmProviderReferenceId, usePromptMode, chatTranscriptSteps, responseFormat, streamStoreCopyInInput, seed, immediateOutput, customModelOptions, customRequestOptions, errorHandling = "continue", // default behavior for LLM Prompt node was, continue its execution even though an error occurred (deviating it from the SEO node) & do not output an error message on UI explicitly. However, error is always stored in the input or context object. We can use an extra "say" node to output it.
|
|
623
631
|
errorHandlingGotoTarget, errorMessage, logErrorToSystem, processImages, transcriptImageHandling, toolChoice, useStrict } = config;
|
|
624
632
|
let prompt = config.prompt || "";
|
|
625
633
|
const { traceId } = input;
|
|
@@ -645,17 +653,17 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
645
653
|
}
|
|
646
654
|
// handle errors from external services, depending on the settings
|
|
647
655
|
const handleServiceError = (error) => __awaiter(void 0, void 0, void 0, function* () {
|
|
648
|
-
var
|
|
656
|
+
var _w, _x, _y, _z, _0, _1;
|
|
649
657
|
const compactError = {
|
|
650
658
|
name: error === null || error === void 0 ? void 0 : error.name,
|
|
651
659
|
code: error === null || error === void 0 ? void 0 : error.code,
|
|
652
660
|
message: (error === null || error === void 0 ? void 0 : error.message) || error
|
|
653
661
|
};
|
|
654
662
|
// return the requestId if it exist in the error obj.
|
|
655
|
-
if ((
|
|
656
|
-
compactError["requestId"] = (
|
|
663
|
+
if ((_w = error === null || error === void 0 ? void 0 : error.meta) === null || _w === void 0 ? void 0 : _w.requestId) {
|
|
664
|
+
compactError["requestId"] = (_x = error === null || error === void 0 ? void 0 : error.meta) === null || _x === void 0 ? void 0 : _x.requestId;
|
|
657
665
|
}
|
|
658
|
-
if ((
|
|
666
|
+
if ((_y = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _y === void 0 ? void 0 : _y.code) {
|
|
659
667
|
compactError.code = error.originalErrorDetails.code;
|
|
660
668
|
}
|
|
661
669
|
const errorResponse = {
|
|
@@ -664,7 +672,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
664
672
|
// add error to context or input
|
|
665
673
|
switch (storeLocation) {
|
|
666
674
|
case "context":
|
|
667
|
-
(
|
|
675
|
+
(_z = api.addToContext) === null || _z === void 0 ? void 0 : _z.call(api, contextKey, errorResponse, "simple");
|
|
668
676
|
break;
|
|
669
677
|
default:
|
|
670
678
|
api.addToInput(inputKey, errorResponse);
|
|
@@ -672,7 +680,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
672
680
|
if (errorHandling === "continue") {
|
|
673
681
|
// output the timeout message
|
|
674
682
|
if (errorMessage) {
|
|
675
|
-
yield ((
|
|
683
|
+
yield ((_0 = api.output) === null || _0 === void 0 ? void 0 : _0.call(api, errorMessage, null));
|
|
676
684
|
}
|
|
677
685
|
// Continue with default node as next node
|
|
678
686
|
const defaultChild = childConfigs === null || childConfigs === void 0 ? void 0 : childConfigs.find(child => child.type === "llmPromptDefault");
|
|
@@ -699,7 +707,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
699
707
|
absorbContext: false
|
|
700
708
|
}
|
|
701
709
|
};
|
|
702
|
-
yield ((
|
|
710
|
+
yield ((_1 = GO_TO.function) === null || _1 === void 0 ? void 0 : _1.call(GO_TO, gotoParams));
|
|
703
711
|
}
|
|
704
712
|
else {
|
|
705
713
|
throw new InternalServerError(error === null || error === void 0 ? void 0 : error.message, { traceId });
|
|
@@ -708,6 +716,8 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
708
716
|
try {
|
|
709
717
|
const isStreamingChannel = input.channel === "webchat3" || input.channel === "adminconsole";
|
|
710
718
|
const _messageId = randomUUID();
|
|
719
|
+
// Start measuring LLM latency and time to first output if debug flag is enabled
|
|
720
|
+
let firstOutputTime = null;
|
|
711
721
|
/**
|
|
712
722
|
* Retrieve the tool definitions from the child nodes
|
|
713
723
|
*/
|
|
@@ -724,6 +734,10 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
724
734
|
var _a;
|
|
725
735
|
text = isStreamingChannel ? text : text.trim();
|
|
726
736
|
if (text) {
|
|
737
|
+
// Record first output time for debugging if not already recorded
|
|
738
|
+
if (debugLogLLMLatency && firstOutputTime === null) {
|
|
739
|
+
firstOutputTime = Date.now();
|
|
740
|
+
}
|
|
727
741
|
// if we got text, we output it, but prevent it from being added to the transcript
|
|
728
742
|
(_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
|
|
729
743
|
_cognigy: {
|
|
@@ -767,15 +781,38 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
767
781
|
content: prompt
|
|
768
782
|
}];
|
|
769
783
|
}
|
|
784
|
+
const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
|
|
770
785
|
// Run the LLM Query
|
|
771
786
|
const fullLlmResult = yield api.runGenerativeAIPrompt(llmPromptOptions, "gptPromptNode");
|
|
787
|
+
// End measuring times and log if debug flag is enabled
|
|
788
|
+
if (debugLogLLMLatency) {
|
|
789
|
+
const llmEndTime = Date.now();
|
|
790
|
+
const debugMessages = [];
|
|
791
|
+
const llmLatencyMs = llmEndTime - llmStartTime;
|
|
792
|
+
let timeToFirstOutputLabel;
|
|
793
|
+
if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
|
|
794
|
+
timeToFirstOutputLabel = " - (tool call)";
|
|
795
|
+
}
|
|
796
|
+
else if (firstOutputTime === null) {
|
|
797
|
+
timeToFirstOutputLabel = " - (no output)";
|
|
798
|
+
}
|
|
799
|
+
else {
|
|
800
|
+
firstOutputTime = firstOutputTime || llmEndTime;
|
|
801
|
+
timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
|
|
802
|
+
}
|
|
803
|
+
if (storeLocation === "stream") {
|
|
804
|
+
debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
|
|
805
|
+
}
|
|
806
|
+
debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
|
|
807
|
+
(_c = api.logDebugMessage) === null || _c === void 0 ? void 0 : _c.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
|
|
808
|
+
}
|
|
772
809
|
const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
|
|
773
810
|
const isFollowSessionActive = api.getMetadata().isFollowSessionActive;
|
|
774
811
|
if (debugLogToolDefinitions) {
|
|
775
|
-
(
|
|
812
|
+
(_d = api.logDebugMessage) === null || _d === void 0 ? void 0 : _d.call(api, tools, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_DEFINITIONS");
|
|
776
813
|
}
|
|
777
814
|
// if we're in adminconsole or following a session, process debugging options
|
|
778
|
-
(input.endpointType === "adminconsole" || isFollowSessionActive) && writeLLMDebugLogs("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy);
|
|
815
|
+
(input.endpointType === "adminconsole" || isFollowSessionActive) && writeLLMDebugLogs("LLM Prompt", debugPrompt, llmResult, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, "llmPromptV2");
|
|
779
816
|
if (llmResult.finishReason === "tool_calls" && llmResult.toolCalls.length > 0) {
|
|
780
817
|
const mainToolCall = llmResult.toolCalls[0];
|
|
781
818
|
let isMcpToolCall = false;
|
|
@@ -787,7 +824,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
787
824
|
isMcpToolCall = true;
|
|
788
825
|
}
|
|
789
826
|
if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
|
|
790
|
-
(
|
|
827
|
+
(_e = api.logDebugError) === null || _e === void 0 ? void 0 : _e.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
|
|
791
828
|
}
|
|
792
829
|
// Add last tool call to session state for loading it from Tool Answer Node
|
|
793
830
|
api.updateSessionStateValues({
|
|
@@ -795,20 +832,20 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
795
832
|
flow: flowReferenceId,
|
|
796
833
|
node: nodeId,
|
|
797
834
|
} }, (isMcpToolCall && {
|
|
798
|
-
mcpServerUrl: (
|
|
799
|
-
timeout: (
|
|
835
|
+
mcpServerUrl: (_f = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _f === void 0 ? void 0 : _f.mcpServerUrl,
|
|
836
|
+
timeout: (_g = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _g === void 0 ? void 0 : _g.timeout,
|
|
800
837
|
mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
|
|
801
838
|
})), { toolCall: mainToolCall }),
|
|
802
839
|
});
|
|
803
840
|
// if there are any parameters/arguments, add them to the input slots
|
|
804
841
|
if (mainToolCall.function.arguments) {
|
|
805
|
-
input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (
|
|
842
|
+
input.llmPrompt = Object.assign(Object.assign({}, input.llmPrompt), { toolArgs: Object.assign(Object.assign({}, (_j = (_h = input.llmPrompt) === null || _h === void 0 ? void 0 : _h.toolArgs) !== null && _j !== void 0 ? _j : {}), mainToolCall.function.arguments) });
|
|
806
843
|
}
|
|
807
844
|
// Debug Message for Tool Calls, configured in the Tool Node
|
|
808
|
-
if ((
|
|
845
|
+
if ((_k = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _k === void 0 ? void 0 : _k.debugMessage) {
|
|
809
846
|
const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${api.parseCognigyScriptText(toolChild.config.toolId)}`];
|
|
810
847
|
// Arguments / Parameters Slots
|
|
811
|
-
const slots = ((
|
|
848
|
+
const slots = ((_l = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _l === void 0 ? void 0 : _l.arguments) && Object.keys(mainToolCall.function.arguments);
|
|
812
849
|
const hasSlots = slots && slots.length > 0;
|
|
813
850
|
messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
|
|
814
851
|
if (hasSlots) {
|
|
@@ -823,7 +860,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
823
860
|
messageLines.push(`- ${slot}: ${slotValueAsString}`);
|
|
824
861
|
});
|
|
825
862
|
}
|
|
826
|
-
(
|
|
863
|
+
(_m = api.logDebugMessage) === null || _m === void 0 ? void 0 : _m.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
|
|
827
864
|
}
|
|
828
865
|
if (toolChild) {
|
|
829
866
|
api.setNextNode(toolChild.id);
|
|
@@ -848,11 +885,11 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
848
885
|
// we stringify objects (e.g. results coming from JSON Mode)
|
|
849
886
|
// so that the transcript only contains text
|
|
850
887
|
const resultToOutput = typeof ((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult) === "object" ? JSON.stringify((llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult, undefined, 2) : (llmResult === null || llmResult === void 0 ? void 0 : llmResult.result) || llmResult;
|
|
851
|
-
yield ((
|
|
888
|
+
yield ((_o = api.output) === null || _o === void 0 ? void 0 : _o.call(api, resultToOutput, {}));
|
|
852
889
|
}
|
|
853
890
|
else if (llmResult.finishReason && llmPromptOptions.stream) {
|
|
854
891
|
// send the finishReason as last output for a stream
|
|
855
|
-
(
|
|
892
|
+
(_p = api.output) === null || _p === void 0 ? void 0 : _p.call(api, "", {
|
|
856
893
|
_cognigy: {
|
|
857
894
|
_preventTranscript: true,
|
|
858
895
|
_messageId,
|
|
@@ -875,7 +912,7 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
875
912
|
}
|
|
876
913
|
// Add response to Cognigy Input/Context for further usage
|
|
877
914
|
if (storeLocation === "context") {
|
|
878
|
-
(
|
|
915
|
+
(_q = api.addToContext) === null || _q === void 0 ? void 0 : _q.call(api, contextKey, llmResult, "simple");
|
|
879
916
|
}
|
|
880
917
|
else if (storeLocation === "input") {
|
|
881
918
|
api.addToInput(inputKey, llmResult);
|
|
@@ -888,19 +925,19 @@ export const LLM_PROMPT_V2 = createNodeDescriptor({
|
|
|
888
925
|
const errorDetailsBase = {
|
|
889
926
|
name: error === null || error === void 0 ? void 0 : error.name,
|
|
890
927
|
code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
|
|
891
|
-
message: (error === null || error === void 0 ? void 0 : error.message) || ((
|
|
928
|
+
message: (error === null || error === void 0 ? void 0 : error.message) || ((_r = error.originalErrorDetails) === null || _r === void 0 ? void 0 : _r.message),
|
|
892
929
|
};
|
|
893
930
|
const errorDetails = Object.assign(Object.assign({}, errorDetailsBase), { originalErrorDetails: error === null || error === void 0 ? void 0 : error.originalErrorDetails });
|
|
894
931
|
// return the requestId if it exist in the error obj.
|
|
895
|
-
if ((
|
|
932
|
+
if ((_s = error.meta) === null || _s === void 0 ? void 0 : _s.requestId) {
|
|
896
933
|
errorDetails["meta"] = {
|
|
897
|
-
requestId: (
|
|
934
|
+
requestId: (_t = error.meta) === null || _t === void 0 ? void 0 : _t.requestId
|
|
898
935
|
};
|
|
899
936
|
}
|
|
900
937
|
if (logErrorToSystem) {
|
|
901
|
-
(
|
|
938
|
+
(_u = api.log) === null || _u === void 0 ? void 0 : _u.call(api, "error", JSON.stringify(errorDetailsBase));
|
|
902
939
|
}
|
|
903
|
-
(
|
|
940
|
+
(_v = api.logDebugError) === null || _v === void 0 ? void 0 : _v.call(api, errorDetailsBase, "UI__DEBUG_MODE__LLM_PROMPT__ERROR");
|
|
904
941
|
yield handleServiceError(errorDetails);
|
|
905
942
|
return;
|
|
906
943
|
}
|
|
@@ -150,7 +150,9 @@ export class BaseContext {
|
|
|
150
150
|
return (c) ? c : undefined;
|
|
151
151
|
}
|
|
152
152
|
else { // there is none
|
|
153
|
-
return
|
|
153
|
+
return this.context[key] !== null && this.context[key] !== undefined
|
|
154
|
+
? this.context[key]
|
|
155
|
+
: undefined;
|
|
154
156
|
}
|
|
155
157
|
}
|
|
156
158
|
/**
|
|
@@ -33,6 +33,9 @@ export const createHandoverRequestDataSchema = {
|
|
|
33
33
|
"type": "string",
|
|
34
34
|
"format": "mongo-id",
|
|
35
35
|
},
|
|
36
|
+
"locale": {
|
|
37
|
+
"type": "string",
|
|
38
|
+
},
|
|
36
39
|
"userId": {
|
|
37
40
|
"type": "string"
|
|
38
41
|
},
|
|
@@ -195,6 +198,9 @@ export const sendMessageToProviderSchema = {
|
|
|
195
198
|
"type": "string",
|
|
196
199
|
"format": "mongo-id",
|
|
197
200
|
},
|
|
201
|
+
"locale": {
|
|
202
|
+
"type": "string",
|
|
203
|
+
},
|
|
198
204
|
"userId": {
|
|
199
205
|
"type": "string"
|
|
200
206
|
},
|
|
@@ -10,7 +10,7 @@ export const aiAgentDataSchema = {
|
|
|
10
10
|
name: { type: "string", format: "resource-name" },
|
|
11
11
|
image: { type: "string" },
|
|
12
12
|
imageOptimizedFormat: { type: "boolean" },
|
|
13
|
-
knowledgeReferenceId: { type: "string", format: "uuid" },
|
|
13
|
+
knowledgeReferenceId: { type: ["string", "null"], format: "uuid" },
|
|
14
14
|
description: { type: "string", maxLength: 1000 },
|
|
15
15
|
speakingStyle: {
|
|
16
16
|
type: "object",
|
|
@@ -1,21 +1,25 @@
|
|
|
1
|
+
var _a;
|
|
1
2
|
import { entityMetaSchema } from "./IEntityMeta";
|
|
2
3
|
import { nodeDescriptorSchema } from "./INodeDescriptorSet";
|
|
4
|
+
import { knowledgeDescriptorSchema } from "./IKnowledgeDescriptor";
|
|
3
5
|
export const extensionNodePackageDataSchema = {
|
|
4
6
|
title: "extensionNodePackageDataSchema",
|
|
5
7
|
type: "object",
|
|
6
8
|
additionalProperties: false,
|
|
7
9
|
properties: {
|
|
8
10
|
nodes: { type: "array", items: nodeDescriptorSchema },
|
|
11
|
+
knowledge: {
|
|
12
|
+
type: "array",
|
|
13
|
+
items: knowledgeDescriptorSchema,
|
|
14
|
+
maxItems: parseInt((_a = process === null || process === void 0 ? void 0 : process.env) === null || _a === void 0 ? void 0 : _a.EXTENSIONS_KNOWLEDGE_EXTRACTORS_MAX, 10) || 20
|
|
15
|
+
},
|
|
9
16
|
connections: {
|
|
10
17
|
type: "array",
|
|
11
18
|
additionalItems: false,
|
|
12
19
|
items: {
|
|
13
20
|
type: "object",
|
|
14
21
|
additionalProperties: false,
|
|
15
|
-
required: [
|
|
16
|
-
"type",
|
|
17
|
-
"fields"
|
|
18
|
-
],
|
|
22
|
+
required: ["type", "fields"],
|
|
19
23
|
properties: {
|
|
20
24
|
label: { type: "string", format: "resource-name" },
|
|
21
25
|
type: { type: "string", format: "resource-name" },
|
|
@@ -25,9 +29,7 @@ export const extensionNodePackageDataSchema = {
|
|
|
25
29
|
items: {
|
|
26
30
|
type: "object",
|
|
27
31
|
additionalProperties: false,
|
|
28
|
-
required: [
|
|
29
|
-
"fieldName"
|
|
30
|
-
],
|
|
32
|
+
required: ["fieldName"],
|
|
31
33
|
properties: {
|
|
32
34
|
_id: { type: "string", format: "mongo-id" },
|
|
33
35
|
fieldName: {
|
|
@@ -50,10 +52,7 @@ export const extensionNodePackageDataSchema = {
|
|
|
50
52
|
items: {
|
|
51
53
|
type: "object",
|
|
52
54
|
additionalProperties: false,
|
|
53
|
-
required: [
|
|
54
|
-
"type",
|
|
55
|
-
"path"
|
|
56
|
-
],
|
|
55
|
+
required: ["type", "path"],
|
|
57
56
|
properties: {
|
|
58
57
|
label: { type: "string", format: "resource-name" },
|
|
59
58
|
type: { type: "string", format: "resource-name" },
|
|
@@ -61,7 +60,7 @@ export const extensionNodePackageDataSchema = {
|
|
|
61
60
|
}
|
|
62
61
|
}
|
|
63
62
|
}
|
|
64
|
-
}
|
|
63
|
+
}
|
|
65
64
|
};
|
|
66
65
|
/** 512 KB (1 * 1024 * 1024), Javascript uses 16bit for a single character */
|
|
67
66
|
export const EXTENSION_README_SIZE = 1048576;
|
|
@@ -75,6 +74,6 @@ export const extensionSchema = {
|
|
|
75
74
|
title: "extensionSchema",
|
|
76
75
|
type: "object",
|
|
77
76
|
additionalProperties: false,
|
|
78
|
-
properties: Object.assign(Object.assign(Object.assign({}, entityMetaSchema.properties), extensionDataSchema.properties), { subResourceReference: { type: "string", format: "mongo-id" }, projectReference: { type: "string", format: "mongo-id" }, organisationReference: { type: "string", format: "mongo-id" } })
|
|
77
|
+
properties: Object.assign(Object.assign(Object.assign({}, entityMetaSchema.properties), extensionDataSchema.properties), { subResourceReference: { type: "string", format: "mongo-id" }, knowledge: { type: "array", items: knowledgeDescriptorSchema }, projectReference: { type: "string", format: "mongo-id" }, organisationReference: { type: "string", format: "mongo-id" } })
|
|
79
78
|
};
|
|
80
79
|
//# sourceMappingURL=IExtension.js.map
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { nodeDescriptorSchema, nodeFieldSchema, } from "./INodeDescriptorSet";
|
|
2
|
+
export const knowledgeFieldTypes = [
|
|
3
|
+
"text",
|
|
4
|
+
"rule",
|
|
5
|
+
"json",
|
|
6
|
+
"checkbox",
|
|
7
|
+
"time",
|
|
8
|
+
"date",
|
|
9
|
+
"datetime",
|
|
10
|
+
"select",
|
|
11
|
+
"xml",
|
|
12
|
+
"textArray",
|
|
13
|
+
"chipInput",
|
|
14
|
+
"toggle",
|
|
15
|
+
"slider",
|
|
16
|
+
"number",
|
|
17
|
+
"daterange",
|
|
18
|
+
"connection",
|
|
19
|
+
"condition",
|
|
20
|
+
"description",
|
|
21
|
+
];
|
|
22
|
+
export const knowledgeFieldSchema = {
|
|
23
|
+
title: "knowledgeFieldSchema",
|
|
24
|
+
type: "object",
|
|
25
|
+
additionalProperties: false,
|
|
26
|
+
properties: Object.assign(Object.assign({}, nodeFieldSchema.properties), { type: { type: "string", enum: [...knowledgeFieldTypes] }, key: { type: "string", minLength: 1, maxLength: 200 } }),
|
|
27
|
+
};
|
|
28
|
+
const { type, summary, defaultLabel, sections, form } = nodeDescriptorSchema.properties;
|
|
29
|
+
export const knowledgeDescriptorSchema = {
|
|
30
|
+
title: "knowledgeDescriptorSchema",
|
|
31
|
+
type: "object",
|
|
32
|
+
additionalProperties: false,
|
|
33
|
+
properties: {
|
|
34
|
+
type,
|
|
35
|
+
label: defaultLabel,
|
|
36
|
+
summary,
|
|
37
|
+
sections,
|
|
38
|
+
form,
|
|
39
|
+
fields: { type: "array", items: knowledgeFieldSchema },
|
|
40
|
+
},
|
|
41
|
+
};
|
|
42
|
+
//# sourceMappingURL=IKnowledgeDescriptor.js.map
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export var SuccessCriterionType;
|
|
2
|
+
(function (SuccessCriterionType) {
|
|
3
|
+
SuccessCriterionType["TEXT"] = "text";
|
|
4
|
+
SuccessCriterionType["GOAL_COMPLETED"] = "goalCompleted";
|
|
5
|
+
})(SuccessCriterionType || (SuccessCriterionType = {}));
|
|
6
|
+
//# sourceMappingURL=ISimulation.js.map
|
|
@@ -118,6 +118,7 @@ export const arrayTSnapshottableResourceType = [
|
|
|
118
118
|
"playbook",
|
|
119
119
|
"slotFiller",
|
|
120
120
|
"snippet",
|
|
121
|
+
"simulation",
|
|
121
122
|
];
|
|
122
123
|
export const arrayTChartableResourceType = ["flow"];
|
|
123
124
|
export const resourceTypes = [...arrayTResourceType];
|
|
@@ -174,6 +175,7 @@ export const packageableResourceTypes = [
|
|
|
174
175
|
"nluconnector",
|
|
175
176
|
"playbook",
|
|
176
177
|
"snippet",
|
|
178
|
+
"simulation",
|
|
177
179
|
];
|
|
178
180
|
export const primaryResourceTypes = [
|
|
179
181
|
"aiAgent",
|
|
@@ -191,6 +193,7 @@ export const primaryResourceTypes = [
|
|
|
191
193
|
"playbook",
|
|
192
194
|
"snippet",
|
|
193
195
|
"handoverProvider",
|
|
196
|
+
"simulation",
|
|
194
197
|
];
|
|
195
198
|
export const pinnableResourceTypes = [
|
|
196
199
|
"project"
|
package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js
CHANGED
|
@@ -7,6 +7,11 @@ export var ESuccessCriteriaTypeRest_2_0;
|
|
|
7
7
|
(function (ESuccessCriteriaTypeRest_2_0) {
|
|
8
8
|
ESuccessCriteriaTypeRest_2_0["TEXT"] = "text";
|
|
9
9
|
})(ESuccessCriteriaTypeRest_2_0 || (ESuccessCriteriaTypeRest_2_0 = {}));
|
|
10
|
+
export var SuccessCriterionType_2_0;
|
|
11
|
+
(function (SuccessCriterionType_2_0) {
|
|
12
|
+
SuccessCriterionType_2_0["TEXT"] = "text";
|
|
13
|
+
SuccessCriterionType_2_0["GOAL_COMPLETED"] = "goalCompleted";
|
|
14
|
+
})(SuccessCriterionType_2_0 || (SuccessCriterionType_2_0 = {}));
|
|
10
15
|
export var ESentimentTypeRest_2_0;
|
|
11
16
|
(function (ESentimentTypeRest_2_0) {
|
|
12
17
|
ESentimentTypeRest_2_0["POSITIVE"] = "POSITIVE";
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
* - userDetails
|
|
15
15
|
* - users
|
|
16
16
|
* - voiceGatewayAccount
|
|
17
|
+
* - opsCenter
|
|
17
18
|
*/
|
|
18
19
|
export const organisationWidePermissions = [
|
|
19
20
|
"analyticsOdata",
|
|
@@ -25,6 +26,7 @@ export const organisationWidePermissions = [
|
|
|
25
26
|
"userDetails",
|
|
26
27
|
"users",
|
|
27
28
|
"voiceGatewayAccount",
|
|
29
|
+
"opsCenter",
|
|
28
30
|
];
|
|
29
31
|
/**
|
|
30
32
|
* @openapi
|
|
@@ -20,6 +20,7 @@
|
|
|
20
20
|
* - userManager
|
|
21
21
|
* - userDetailsViewer
|
|
22
22
|
* - voiceGatewayUser
|
|
23
|
+
* - opsCenterUser
|
|
23
24
|
*/
|
|
24
25
|
export const organisationWideRoles = [
|
|
25
26
|
"admin",
|
|
@@ -37,6 +38,8 @@ export const organisationWideRoles = [
|
|
|
37
38
|
"userManager",
|
|
38
39
|
"userDetailsViewer",
|
|
39
40
|
"voiceGatewayUser",
|
|
41
|
+
"autoDialerUser",
|
|
42
|
+
"opsCenterUser",
|
|
40
43
|
];
|
|
41
44
|
/**
|
|
42
45
|
* @openapi
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export const COMPUTED_ACL_HASH_VERSION = "
|
|
1
|
+
export const COMPUTED_ACL_HASH_VERSION = "v23";
|
|
2
2
|
export { availablePermissions, } from "./IPermission";
|
|
3
3
|
export { organisationWideRoles, projectWideRoles, availableRoles, } from "./IRole";
|
|
4
4
|
export { operations, } from "./IOperation";
|