@cognigy/rest-api-client 2025.13.0 → 2025.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/build/apigroups/ResourcesAPIGroup_2_0.js +4 -1
  3. package/build/apigroups/SimulationAPIGroup_2_0.js +5 -1
  4. package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  5. package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +31 -4
  6. package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  7. package/build/shared/charts/descriptors/service/GPTPrompt.js +31 -0
  8. package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
  9. package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +58 -21
  10. package/build/shared/helper/BaseContext.js +3 -1
  11. package/build/shared/interfaces/messageAPI/handover.js +6 -0
  12. package/build/shared/interfaces/resources/IAiAgent.js +1 -1
  13. package/build/shared/interfaces/resources/IExtension.js +12 -13
  14. package/build/shared/interfaces/resources/IKnowledgeDescriptor.js +45 -0
  15. package/build/shared/interfaces/resources/ISimulation.js +9 -0
  16. package/build/shared/interfaces/resources/TResourceType.js +3 -0
  17. package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
  18. package/build/shared/interfaces/restAPI/simulation/persona/IGenerateBulkPersonaRest_2_0.js +3 -0
  19. package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
  20. package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
  21. package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
  22. package/build/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +6 -1
  23. package/build/shared/interfaces/security/IPermission.js +2 -0
  24. package/build/shared/interfaces/security/IRole.js +3 -0
  25. package/build/shared/interfaces/security/index.js +1 -1
  26. package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +4 -1
  27. package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +5 -1
  28. package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
  29. package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +31 -4
  30. package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
  31. package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +31 -0
  32. package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +58 -21
  33. package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +65 -28
  34. package/dist/esm/shared/helper/BaseContext.js +3 -1
  35. package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
  36. package/dist/esm/shared/interfaces/resources/IAiAgent.js +1 -1
  37. package/dist/esm/shared/interfaces/resources/IExtension.js +12 -13
  38. package/dist/esm/shared/interfaces/resources/IKnowledgeDescriptor.js +42 -0
  39. package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
  40. package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
  41. package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
  42. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGenerateBulkPersonaRest_2_0.js +2 -0
  43. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
  44. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
  45. package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
  46. package/dist/esm/shared/interfaces/restAPI/simulation/simulationRun/ISimulationRunRest_2_0.js +5 -0
  47. package/dist/esm/shared/interfaces/security/IPermission.js +2 -0
  48. package/dist/esm/shared/interfaces/security/IRole.js +3 -0
  49. package/dist/esm/shared/interfaces/security/index.js +1 -1
  50. package/package.json +1 -1
  51. package/types/index.d.ts +776 -433
@@ -1,24 +1,28 @@
1
1
  "use strict";
2
+ var _a;
2
3
  Object.defineProperty(exports, "__esModule", { value: true });
3
4
  exports.extensionSchema = exports.extensionDataSchema = exports.EXTENSION_README_SIZE = exports.extensionNodePackageDataSchema = void 0;
4
5
  const IEntityMeta_1 = require("./IEntityMeta");
5
6
  const INodeDescriptorSet_1 = require("./INodeDescriptorSet");
7
+ const IKnowledgeDescriptor_1 = require("./IKnowledgeDescriptor");
6
8
  exports.extensionNodePackageDataSchema = {
7
9
  title: "extensionNodePackageDataSchema",
8
10
  type: "object",
9
11
  additionalProperties: false,
10
12
  properties: {
11
13
  nodes: { type: "array", items: INodeDescriptorSet_1.nodeDescriptorSchema },
14
+ knowledge: {
15
+ type: "array",
16
+ items: IKnowledgeDescriptor_1.knowledgeDescriptorSchema,
17
+ maxItems: parseInt((_a = process === null || process === void 0 ? void 0 : process.env) === null || _a === void 0 ? void 0 : _a.EXTENSIONS_KNOWLEDGE_EXTRACTORS_MAX, 10) || 20
18
+ },
12
19
  connections: {
13
20
  type: "array",
14
21
  additionalItems: false,
15
22
  items: {
16
23
  type: "object",
17
24
  additionalProperties: false,
18
- required: [
19
- "type",
20
- "fields"
21
- ],
25
+ required: ["type", "fields"],
22
26
  properties: {
23
27
  label: { type: "string", format: "resource-name" },
24
28
  type: { type: "string", format: "resource-name" },
@@ -28,9 +32,7 @@ exports.extensionNodePackageDataSchema = {
28
32
  items: {
29
33
  type: "object",
30
34
  additionalProperties: false,
31
- required: [
32
- "fieldName"
33
- ],
35
+ required: ["fieldName"],
34
36
  properties: {
35
37
  _id: { type: "string", format: "mongo-id" },
36
38
  fieldName: {
@@ -53,10 +55,7 @@ exports.extensionNodePackageDataSchema = {
53
55
  items: {
54
56
  type: "object",
55
57
  additionalProperties: false,
56
- required: [
57
- "type",
58
- "path"
59
- ],
58
+ required: ["type", "path"],
60
59
  properties: {
61
60
  label: { type: "string", format: "resource-name" },
62
61
  type: { type: "string", format: "resource-name" },
@@ -64,7 +63,7 @@ exports.extensionNodePackageDataSchema = {
64
63
  }
65
64
  }
66
65
  }
67
- },
66
+ }
68
67
  };
69
68
  /** 512 KB (1 * 1024 * 1024), Javascript uses 16bit for a single character */
70
69
  exports.EXTENSION_README_SIZE = 1048576;
@@ -78,6 +77,6 @@ exports.extensionSchema = {
78
77
  title: "extensionSchema",
79
78
  type: "object",
80
79
  additionalProperties: false,
81
- properties: Object.assign(Object.assign(Object.assign({}, IEntityMeta_1.entityMetaSchema.properties), exports.extensionDataSchema.properties), { subResourceReference: { type: "string", format: "mongo-id" }, projectReference: { type: "string", format: "mongo-id" }, organisationReference: { type: "string", format: "mongo-id" } })
80
+ properties: Object.assign(Object.assign(Object.assign({}, IEntityMeta_1.entityMetaSchema.properties), exports.extensionDataSchema.properties), { subResourceReference: { type: "string", format: "mongo-id" }, knowledge: { type: "array", items: IKnowledgeDescriptor_1.knowledgeDescriptorSchema }, projectReference: { type: "string", format: "mongo-id" }, organisationReference: { type: "string", format: "mongo-id" } })
82
81
  };
83
82
  //# sourceMappingURL=IExtension.js.map
@@ -0,0 +1,45 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.knowledgeDescriptorSchema = exports.knowledgeFieldSchema = exports.knowledgeFieldTypes = void 0;
4
+ const INodeDescriptorSet_1 = require("./INodeDescriptorSet");
5
+ exports.knowledgeFieldTypes = [
6
+ "text",
7
+ "rule",
8
+ "json",
9
+ "checkbox",
10
+ "time",
11
+ "date",
12
+ "datetime",
13
+ "select",
14
+ "xml",
15
+ "textArray",
16
+ "chipInput",
17
+ "toggle",
18
+ "slider",
19
+ "number",
20
+ "daterange",
21
+ "connection",
22
+ "condition",
23
+ "description",
24
+ ];
25
+ exports.knowledgeFieldSchema = {
26
+ title: "knowledgeFieldSchema",
27
+ type: "object",
28
+ additionalProperties: false,
29
+ properties: Object.assign(Object.assign({}, INodeDescriptorSet_1.nodeFieldSchema.properties), { type: { type: "string", enum: [...exports.knowledgeFieldTypes] }, key: { type: "string", minLength: 1, maxLength: 200 } }),
30
+ };
31
+ const { type, summary, defaultLabel, sections, form } = INodeDescriptorSet_1.nodeDescriptorSchema.properties;
32
+ exports.knowledgeDescriptorSchema = {
33
+ title: "knowledgeDescriptorSchema",
34
+ type: "object",
35
+ additionalProperties: false,
36
+ properties: {
37
+ type,
38
+ label: defaultLabel,
39
+ summary,
40
+ sections,
41
+ form,
42
+ fields: { type: "array", items: exports.knowledgeFieldSchema },
43
+ },
44
+ };
45
+ //# sourceMappingURL=IKnowledgeDescriptor.js.map
@@ -0,0 +1,9 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.SuccessCriterionType = void 0;
4
+ var SuccessCriterionType;
5
+ (function (SuccessCriterionType) {
6
+ SuccessCriterionType["TEXT"] = "text";
7
+ SuccessCriterionType["GOAL_COMPLETED"] = "goalCompleted";
8
+ })(SuccessCriterionType = exports.SuccessCriterionType || (exports.SuccessCriterionType = {}));
9
+ //# sourceMappingURL=ISimulation.js.map
@@ -121,6 +121,7 @@ exports.arrayTSnapshottableResourceType = [
121
121
  "playbook",
122
122
  "slotFiller",
123
123
  "snippet",
124
+ "simulation",
124
125
  ];
125
126
  exports.arrayTChartableResourceType = ["flow"];
126
127
  exports.resourceTypes = [...exports.arrayTResourceType];
@@ -177,6 +178,7 @@ exports.packageableResourceTypes = [
177
178
  "nluconnector",
178
179
  "playbook",
179
180
  "snippet",
181
+ "simulation",
180
182
  ];
181
183
  exports.primaryResourceTypes = [
182
184
  "aiAgent",
@@ -194,6 +196,7 @@ exports.primaryResourceTypes = [
194
196
  "playbook",
195
197
  "snippet",
196
198
  "handoverProvider",
199
+ "simulation",
197
200
  ];
198
201
  exports.pinnableResourceTypes = [
199
202
  "project"
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=IReadFlowChartAiAgentsRest_2_0.js.map
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=IGenerateBulkPersonaRest_2_0.js.map
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=IGeneratePersonaRest_2_0.js.map
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=IGetPersonaOptionsRest_2_0.js.map
@@ -0,0 +1,3 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ //# sourceMappingURL=IRegeneratePersonaFieldRest_2_0.js.map
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ESentimentTypeRest_2_0 = exports.ESuccessCriteriaTypeRest_2_0 = exports.ETurnTypeRest_2_0 = void 0;
3
+ exports.ESentimentTypeRest_2_0 = exports.SuccessCriterionType_2_0 = exports.ESuccessCriteriaTypeRest_2_0 = exports.ETurnTypeRest_2_0 = void 0;
4
4
  var ETurnTypeRest_2_0;
5
5
  (function (ETurnTypeRest_2_0) {
6
6
  ETurnTypeRest_2_0["INPUT"] = "input";
@@ -10,6 +10,11 @@ var ESuccessCriteriaTypeRest_2_0;
10
10
  (function (ESuccessCriteriaTypeRest_2_0) {
11
11
  ESuccessCriteriaTypeRest_2_0["TEXT"] = "text";
12
12
  })(ESuccessCriteriaTypeRest_2_0 = exports.ESuccessCriteriaTypeRest_2_0 || (exports.ESuccessCriteriaTypeRest_2_0 = {}));
13
+ var SuccessCriterionType_2_0;
14
+ (function (SuccessCriterionType_2_0) {
15
+ SuccessCriterionType_2_0["TEXT"] = "text";
16
+ SuccessCriterionType_2_0["GOAL_COMPLETED"] = "goalCompleted";
17
+ })(SuccessCriterionType_2_0 = exports.SuccessCriterionType_2_0 || (exports.SuccessCriterionType_2_0 = {}));
13
18
  var ESentimentTypeRest_2_0;
14
19
  (function (ESentimentTypeRest_2_0) {
15
20
  ESentimentTypeRest_2_0["POSITIVE"] = "POSITIVE";
@@ -17,6 +17,7 @@ exports.availablePermissions = exports.projectWidePermissions = exports.organisa
17
17
  * - userDetails
18
18
  * - users
19
19
  * - voiceGatewayAccount
20
+ * - opsCenter
20
21
  */
21
22
  exports.organisationWidePermissions = [
22
23
  "analyticsOdata",
@@ -28,6 +29,7 @@ exports.organisationWidePermissions = [
28
29
  "userDetails",
29
30
  "users",
30
31
  "voiceGatewayAccount",
32
+ "opsCenter",
31
33
  ];
32
34
  /**
33
35
  * @openapi
@@ -23,6 +23,7 @@ exports.projectRolesWithSpecialOrgRights = exports.availableRoles = exports.proj
23
23
  * - userManager
24
24
  * - userDetailsViewer
25
25
  * - voiceGatewayUser
26
+ * - opsCenterUser
26
27
  */
27
28
  exports.organisationWideRoles = [
28
29
  "admin",
@@ -40,6 +41,8 @@ exports.organisationWideRoles = [
40
41
  "userManager",
41
42
  "userDetailsViewer",
42
43
  "voiceGatewayUser",
44
+ "autoDialerUser",
45
+ "opsCenterUser",
43
46
  ];
44
47
  /**
45
48
  * @openapi
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.identityProviderSchema = exports.oidcIdentityProviderDataSchema = exports.samlIdentityProviderDataSchema = exports.operations = exports.availableRoles = exports.projectWideRoles = exports.organisationWideRoles = exports.availablePermissions = exports.COMPUTED_ACL_HASH_VERSION = void 0;
4
- exports.COMPUTED_ACL_HASH_VERSION = "v20";
4
+ exports.COMPUTED_ACL_HASH_VERSION = "v23";
5
5
  var IPermission_1 = require("./IPermission");
6
6
  Object.defineProperty(exports, "availablePermissions", { enumerable: true, get: function () { return IPermission_1.availablePermissions; } });
7
7
  var IRole_1 = require("./IRole");
@@ -619,7 +619,10 @@ export const ResourcesAPIGroup_2_0 = (instance) => {
619
619
  generateDesignTimeLLMOutput: (_a) => {
620
620
  var { projectId } = _a, args = __rest(_a, ["projectId"]);
621
621
  return GenericAPIFn(`/new/v2.0/projects/${projectId}/generate-output/design-time-llm`, "POST", self)(args);
622
- }
622
+ },
623
+ readFlowChartAiAgents: ({ flowId, preferredLocaleId }, options) => GenericAPIFn(`/new/v2.0/flows/${flowId}/chart/nodes/aiagents?${stringifyQuery({
624
+ preferredLocaleId
625
+ })}`, "GET", self)(undefined, options)
623
626
  };
624
627
  };
625
628
  //# sourceMappingURL=ResourcesAPIGroup_2_0.js.map
@@ -38,7 +38,11 @@ export function SimulationAPIGroup_2_0(instance) {
38
38
  readSimulationRun: (_a, options) => {
39
39
  var { simulationReference, simulationRunBatchReference, simulationRunReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference", "simulationRunReference"]);
40
40
  return GenericAPIFn(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs/${simulationRunReference}?${stringifyQuery(args)}`, "GET", self)(undefined, options);
41
- }
41
+ },
42
+ getPersonaOptions: (args, options) => GenericAPIFn("/testing/beta/personas/options", "POST", self)(args, options),
43
+ generatePersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate", "POST", self)(args, options),
44
+ regeneratePersonaField: (args, options) => GenericAPIFn("/testing/beta/personas/regenerate-field", "POST", self)(args, options),
45
+ generateBulkPersona: (args, options) => GenericAPIFn("/testing/beta/personas/generate-bulk", "POST", self)(args, options)
42
46
  };
43
47
  }
44
48
  //# sourceMappingURL=SimulationAPIGroup_2_0.js.map
@@ -6,6 +6,12 @@ export const AMAZON_STORAGE_PROVIDER_CONNECTION = {
6
6
  { fieldName: "secretAccessKey", label: "UI__CONNECTION_EDITOR__FIELD_SECRET_ACCESS_KEY" },
7
7
  { fieldName: "region", label: "UI__CONNECTION_EDITOR__FIELD_REGION" },
8
8
  { fieldName: "bucketName", label: "UI__CONNECTION_EDITOR__FIELD_BUCKET_NAME" },
9
- ]
9
+ {
10
+ fieldName: "customUrl",
11
+ label: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL",
12
+ required: false,
13
+ description: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL_AWS__DESCRIPTION"
14
+ },
15
+ ],
10
16
  };
11
17
  //# sourceMappingURL=amazonStorageProviderConnection.js.map
@@ -639,10 +639,37 @@ New: `;
639
639
  message: (error === null || error === void 0 ? void 0 : error.message) || error,
640
640
  };
641
641
  api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
642
- api.emitToOpsCenter({
643
- subComponent: "KnowledgeAIQueries",
644
- title: error === null || error === void 0 ? void 0 : error.message
645
- });
642
+ if (!(error instanceof InternalServerError)) {
643
+ const metadata = api.getMetadata();
644
+ api.emitToOpsCenter({
645
+ projectId: metadata === null || metadata === void 0 ? void 0 : metadata.projectId,
646
+ subComponent: "KnowledgeAIQueries",
647
+ title: error === null || error === void 0 ? void 0 : error.message,
648
+ errorCode: "ERR_FLOW_006",
649
+ metadata: {
650
+ // extra metadata for the error is enriched in service-sentinel
651
+ snapshot: {
652
+ id: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId,
653
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotName,
654
+ },
655
+ flow: {
656
+ referenceId: cognigy.flowReferenceId,
657
+ name: input.flowName,
658
+ },
659
+ node: {
660
+ referenceId: nodeId
661
+ },
662
+ knowledgeStore: {
663
+ referenceId: knowledgeStoreId,
664
+ },
665
+ locale: {
666
+ referenceId: metadata === null || metadata === void 0 ? void 0 : metadata.localeReferenceId,
667
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.localeName,
668
+ },
669
+ },
670
+ isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
671
+ });
672
+ }
646
673
  if ((_m = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _m === void 0 ? void 0 : _m.code) {
647
674
  compactError["code"] = error.originalErrorDetails.code;
648
675
  }
@@ -125,8 +125,9 @@ export const createLastUserInputString = (lastConversationEntries, turnLimit = 1
125
125
  * @param debugLogTokenCount whether to log the token count
126
126
  * @param debugLogRequestAndCompletion whether to log the request and completion
127
127
  * @param cognigy the cognigy object (input, api, etc)
128
+ * @param nodeType the type of the node (optional)
128
129
  */
129
- export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => __awaiter(void 0, void 0, void 0, function* () {
130
+ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, nodeType) => __awaiter(void 0, void 0, void 0, function* () {
130
131
  var _a, _b, _c, _d;
131
132
  const { api, input } = cognigy;
132
133
  if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
@@ -143,14 +144,21 @@ export const writeLLMDebugLogs = (label, prompt, response, debugLogTokenCount, d
143
144
  if (debugLogTokenCount) {
144
145
  if (prompt) {
145
146
  const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
146
- requestTokenMessage = ` (${requestTokens} Tokens)`;
147
+ requestTokenMessage = ` (${nodeType === "llmPromptV2" ? "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__FULL_REQUEST: " : ""}${requestTokens} Tokens)`;
147
148
  }
148
149
  if (response) {
149
150
  const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
150
151
  completionTokenMessage = ` (${completionTokens} Tokens)`;
151
152
  }
152
153
  }
153
- api.logDebugMessage(`UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER");
154
+ let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
155
+ let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
156
+ if (nodeType === "llmPromptV2") {
157
+ inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__SYSTEM_PROMPT";
158
+ headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
159
+ }
160
+ ;
161
+ api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
154
162
  }
155
163
  catch (err) { }
156
164
  }
@@ -585,6 +585,37 @@ export const GPT_PROMPT = createNodeDescriptor({
585
585
  const errorResponse = {
586
586
  error: compactError,
587
587
  };
588
+ if (!(error instanceof InternalServerError)) {
589
+ const metadata = api.getMetadata();
590
+ api.emitToOpsCenter({
591
+ projectId: metadata === null || metadata === void 0 ? void 0 : metadata.projectId,
592
+ subComponent: "LargeLanguageModelCalls",
593
+ title: error === null || error === void 0 ? void 0 : error.message,
594
+ errorCode: "ERR_FLOW_005",
595
+ isSnapshotError: !!(metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId),
596
+ metadata: {
597
+ // extra metadata for the error is enriched in service-sentinel
598
+ snapshot: {
599
+ id: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotId,
600
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.snapshotName,
601
+ },
602
+ flow: {
603
+ referenceId: cognigy.flowReferenceId,
604
+ name: input.flowName,
605
+ },
606
+ node: {
607
+ referenceId: nodeId
608
+ },
609
+ llm: {
610
+ referenceId: llmProviderReferenceId,
611
+ },
612
+ locale: {
613
+ referenceId: metadata === null || metadata === void 0 ? void 0 : metadata.localeReferenceId,
614
+ name: metadata === null || metadata === void 0 ? void 0 : metadata.localeName,
615
+ },
616
+ }
617
+ });
618
+ }
588
619
  // add error to context or input
589
620
  switch (storeLocation) {
590
621
  case "context":
@@ -499,6 +499,13 @@ export const AI_AGENT_JOB = createNodeDescriptor({
499
499
  description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_CONFIG__DESCRIPTION",
500
500
  defaultValue: true
501
501
  },
502
+ {
503
+ key: "debugLogLLMLatency",
504
+ label: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__LABEL",
505
+ type: "toggle",
506
+ description: "UI__NODE_EDITOR__SERVICE__AI_AGENT_JOB__FIELDS__LOG_LLM_LATENCY__DESCRIPTION",
507
+ defaultValue: false
508
+ },
502
509
  {
503
510
  key: "storeLocation",
504
511
  type: "select",
@@ -818,7 +825,8 @@ export const AI_AGENT_JOB = createNodeDescriptor({
818
825
  "debugResult",
819
826
  "debugLogTokenCount",
820
827
  "debugLogSystemPrompt",
821
- "debugLogToolDefinitions"
828
+ "debugLogToolDefinitions",
829
+ "debugLogLLMLatency"
822
830
  ],
823
831
  }
824
832
  ],
@@ -839,9 +847,9 @@ export const AI_AGENT_JOB = createNodeDescriptor({
839
847
  ],
840
848
  tags: ["ai", "aiAgent"],
841
849
  function: ({ cognigy, config, childConfigs, nodeId }) => __awaiter(void 0, void 0, void 0, function* () {
842
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20;
850
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21;
843
851
  const { api, context, input, profile, flowReferenceId } = cognigy;
844
- const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
852
+ const { aiAgent, llmProviderReferenceId, name: jobName, description: jobDescription, instructions: jobInstructions, outputImmediately, toolChoice, useStrict, memoryType, selectedProfileFields, memoryContextInjection, knowledgeSearchBehavior, knowledgeSearchTags, knowledgeSearchTagsFilterOp, knowledgeSearchAiAgentKnowledge, knowledgeSearchJobKnowledge, knowledgeSearchJobStore, knowledgeSearchGenerateSearchPrompt, knowledgeSearchTopK, timeoutInMs, maxTokens, temperature, logErrorToSystem, storeErrorInInput, errorHandling, errorHandlingGotoTarget, errorMessage, debugConfig, debugLogTokenCount, debugLogSystemPrompt, debugLogToolDefinitions, debugResult, debugLogLLMLatency, storeLocation, contextKey, inputKey, streamStoreCopyInInput, streamStopTokens, processImages, transcriptImageHandling, sessionParams } = config;
845
853
  try {
846
854
  if (!aiAgent) {
847
855
  throw new Error("Could not resolve AI Agent reference in AI Agent Node");
@@ -889,7 +897,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
889
897
  throw new Error(`[VG2] Error on AI Agent Job node. Error message: ${error.message}`);
890
898
  }
891
899
  }
892
- const _21 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _21, cleanedProfile = __rest(_21, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
900
+ const _22 = profile, { profileId, accepted_gdpr, prevent_data_collection, privacy_policy } = _22, cleanedProfile = __rest(_22, ["profileId", "accepted_gdpr", "prevent_data_collection", "privacy_policy"]);
893
901
  const userMemory = getUserMemory(memoryType, selectedProfileFields, aiAgent, cleanedProfile);
894
902
  /**
895
903
  * ----- Knowledge Search Section -----
@@ -1139,6 +1147,10 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1139
1147
  var _a;
1140
1148
  text = isStreamingChannel ? text : text.trim();
1141
1149
  if (text) {
1150
+ // Record first output time for debugging if not already recorded
1151
+ if (debugLogLLMLatency && firstOutputTime === null) {
1152
+ firstOutputTime = Date.now();
1153
+ }
1142
1154
  // if we got text, we output it, but prevent it from being added to the transcript
1143
1155
  (_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, text, {
1144
1156
  _cognigy: {
@@ -1161,13 +1173,38 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1161
1173
  // Set understood to true so that an AI Agent interaction doesn't look false in our analytics
1162
1174
  (_1 = api.setAnalyticsData) === null || _1 === void 0 ? void 0 : _1.call(api, "understood", "true");
1163
1175
  input.understood = true;
1176
+ // Start measuring LLM latency and time to first output if debug flag is enabled
1177
+ const llmStartTime = debugLogLLMLatency ? Date.now() : 0;
1178
+ let firstOutputTime = null;
1164
1179
  const fullLlmResult = yield ((_2 = api.runGenerativeAIPrompt) === null || _2 === void 0 ? void 0 : _2.call(api, llmPromptOptions, "aiAgent"));
1180
+ // End measuring times and log if debug flag is enabled
1181
+ if (debugLogLLMLatency) {
1182
+ const llmEndTime = Date.now();
1183
+ const debugMessages = [];
1184
+ const llmLatencyMs = llmEndTime - llmStartTime;
1185
+ let timeToFirstOutputLabel;
1186
+ if (fullLlmResult.finishReason === "tool_calls" && fullLlmResult.toolCalls.length > 0) {
1187
+ timeToFirstOutputLabel = " - (tool call)";
1188
+ }
1189
+ else if (firstOutputTime === null) {
1190
+ timeToFirstOutputLabel = " - (no output)";
1191
+ }
1192
+ else {
1193
+ firstOutputTime = firstOutputTime || llmEndTime;
1194
+ timeToFirstOutputLabel = `${firstOutputTime - llmStartTime}ms`;
1195
+ }
1196
+ if (storeLocation === "stream") {
1197
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__TIME_TO_FIRST_OUTPUT__LABEL: ${timeToFirstOutputLabel}`);
1198
+ }
1199
+ debugMessages.push(`UI__DEBUG_MODE__AI_AGENT_JOB__LLM_LATENCY__LABEL: ${llmLatencyMs}ms`);
1200
+ (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, debugMessages.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TIMING__HEADER");
1201
+ }
1165
1202
  const { messages } = fullLlmResult, llmResult = __rest(fullLlmResult, ["messages"]);
1166
1203
  const llmProvider = llmResult === null || llmResult === void 0 ? void 0 : llmResult.provider;
1167
1204
  const tokenUsage = fullLlmResult.tokenUsage;
1168
1205
  // Send optional debug message with token usage
1169
1206
  if (debugLogTokenCount && tokenUsage) {
1170
- (_3 = api.logDebugMessage) === null || _3 === void 0 ? void 0 : _3.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1207
+ (_4 = api.logDebugMessage) === null || _4 === void 0 ? void 0 : _4.call(api, tokenUsage, "UI__DEBUG_MODE__AI_AGENT_JOB__TOKEN_USAGE__HEADER");
1171
1208
  }
1172
1209
  // Identify if the result is a tool call
1173
1210
  // If response is a tool call, set next node for Tools
@@ -1182,7 +1219,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1182
1219
  isMcpToolCall = true;
1183
1220
  }
1184
1221
  if (mainToolCall.function.name !== "retrieve_knowledge" && toolChild === undefined) {
1185
- (_4 = api.logDebugError) === null || _4 === void 0 ? void 0 : _4.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1222
+ (_5 = api.logDebugError) === null || _5 === void 0 ? void 0 : _5.call(api, `UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__BODY <b>${mainToolCall.function.name}</b>`, "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__ERROR__HEADER");
1186
1223
  }
1187
1224
  // Add last tool call to session state for loading it from Tool Answer Node
1188
1225
  api.updateSessionStateValues({
@@ -1190,21 +1227,21 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1190
1227
  flow: flowReferenceId,
1191
1228
  node: nodeId,
1192
1229
  } }, (isMcpToolCall && {
1193
- mcpServerUrl: (_5 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _5 === void 0 ? void 0 : _5.mcpServerUrl,
1194
- timeout: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.timeout,
1230
+ mcpServerUrl: (_6 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _6 === void 0 ? void 0 : _6.mcpServerUrl,
1231
+ timeout: (_7 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _7 === void 0 ? void 0 : _7.timeout,
1195
1232
  mcpToolNode: toolChild === null || toolChild === void 0 ? void 0 : toolChild.id,
1196
1233
  })), { toolCall: mainToolCall }),
1197
1234
  });
1198
1235
  // if there are any parameters/arguments, add them to the input slots
1199
1236
  if (mainToolCall.function.arguments) {
1200
- input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_8 = (_7 = input.aiAgent) === null || _7 === void 0 ? void 0 : _7.toolArgs) !== null && _8 !== void 0 ? _8 : {}), mainToolCall.function.arguments) });
1237
+ input.aiAgent = Object.assign(Object.assign({}, input.aiAgent), { toolArgs: Object.assign(Object.assign({}, (_9 = (_8 = input.aiAgent) === null || _8 === void 0 ? void 0 : _8.toolArgs) !== null && _9 !== void 0 ? _9 : {}), mainToolCall.function.arguments) });
1201
1238
  }
1202
1239
  // Debug Message for Tool Calls, configured in the Tool Node
1203
- if ((_9 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _9 === void 0 ? void 0 : _9.debugMessage) {
1240
+ if ((_10 = toolChild === null || toolChild === void 0 ? void 0 : toolChild.config) === null || _10 === void 0 ? void 0 : _10.debugMessage) {
1204
1241
  const toolId = isMcpToolCall ? mainToolCall.function.name : api.parseCognigyScriptText(toolChild.config.toolId);
1205
1242
  const messageLines = [`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER:</b> ${toolId}`];
1206
1243
  // Arguments / Parameters Slots
1207
- const slots = ((_10 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _10 === void 0 ? void 0 : _10.arguments) && Object.keys(mainToolCall.function.arguments);
1244
+ const slots = ((_11 = mainToolCall === null || mainToolCall === void 0 ? void 0 : mainToolCall.function) === null || _11 === void 0 ? void 0 : _11.arguments) && Object.keys(mainToolCall.function.arguments);
1208
1245
  const hasSlots = slots && slots.length > 0;
1209
1246
  messageLines.push(`<b>UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__SLOTS</b>${hasSlots ? "" : " -"}`);
1210
1247
  if (hasSlots) {
@@ -1219,7 +1256,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1219
1256
  messageLines.push(`- ${slot}: ${slotValueAsString}`);
1220
1257
  });
1221
1258
  }
1222
- (_11 = api.logDebugMessage) === null || _11 === void 0 ? void 0 : _11.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1259
+ (_12 = api.logDebugMessage) === null || _12 === void 0 ? void 0 : _12.call(api, messageLines.join("\n"), "UI__DEBUG_MODE__AI_AGENT_JOB__TOOL_CALL__DEBUG_MESSAGE__HEADER");
1223
1260
  }
1224
1261
  if (toolChild) {
1225
1262
  api.setNextNode(toolChild.id);
@@ -1244,11 +1281,11 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1244
1281
  }
1245
1282
  // Optionally output the result immediately
1246
1283
  if (llmResult.result && outputImmediately && !llmPromptOptions.stream) {
1247
- yield ((_12 = api.output) === null || _12 === void 0 ? void 0 : _12.call(api, llmResult.result, {}));
1284
+ yield ((_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, llmResult.result, {}));
1248
1285
  }
1249
1286
  else if (llmResult.finishReason && llmPromptOptions.stream) {
1250
1287
  // send the finishReason as last output for a stream
1251
- (_13 = api.output) === null || _13 === void 0 ? void 0 : _13.call(api, "", {
1288
+ (_14 = api.output) === null || _14 === void 0 ? void 0 : _14.call(api, "", {
1252
1289
  _cognigy: {
1253
1290
  _preventTranscript: true,
1254
1291
  _messageId,
@@ -1271,7 +1308,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1271
1308
  }
1272
1309
  // Add response to Cognigy Input/Context for further usage
1273
1310
  if (storeLocation === "context") {
1274
- (_14 = api.addToContext) === null || _14 === void 0 ? void 0 : _14.call(api, contextKey, llmResult, "simple");
1311
+ (_15 = api.addToContext) === null || _15 === void 0 ? void 0 : _15.call(api, contextKey, llmResult, "simple");
1275
1312
  }
1276
1313
  else if (storeLocation === "input") {
1277
1314
  api.addToInput(inputKey, llmResult);
@@ -1284,14 +1321,14 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1284
1321
  const errorDetails = {
1285
1322
  name: (error === null || error === void 0 ? void 0 : error.name) || "Error",
1286
1323
  code: (error === null || error === void 0 ? void 0 : error.code) || (error === null || error === void 0 ? void 0 : error.httpStatusCode),
1287
- message: (error === null || error === void 0 ? void 0 : error.message) || ((_15 = error.originalErrorDetails) === null || _15 === void 0 ? void 0 : _15.message),
1324
+ message: (error === null || error === void 0 ? void 0 : error.message) || ((_16 = error.originalErrorDetails) === null || _16 === void 0 ? void 0 : _16.message),
1288
1325
  };
1289
- (_16 = api.emitEvent) === null || _16 === void 0 ? void 0 : _16.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1326
+ (_17 = api.emitEvent) === null || _17 === void 0 ? void 0 : _17.call(api, "nodeError", { nodeId, flowId: flowReferenceId, errorMessage: error });
1290
1327
  if (logErrorToSystem) {
1291
- (_17 = api.log) === null || _17 === void 0 ? void 0 : _17.call(api, "error", JSON.stringify(errorDetails));
1328
+ (_18 = api.log) === null || _18 === void 0 ? void 0 : _18.call(api, "error", JSON.stringify(errorDetails));
1292
1329
  }
1293
1330
  if (errorHandling !== "stop") {
1294
- (_18 = api.logDebugError) === null || _18 === void 0 ? void 0 : _18.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1331
+ (_19 = api.logDebugError) === null || _19 === void 0 ? void 0 : _19.call(api, errorDetails.message + (errorDetails.code ? ` (error code: ${errorDetails.code})` : ""), errorDetails.name);
1295
1332
  }
1296
1333
  if (storeErrorInInput) {
1297
1334
  input.aiAgent = input.aiAgent || {};
@@ -1300,7 +1337,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1300
1337
  if (errorHandling === "continue") {
1301
1338
  // output the timeout message
1302
1339
  if (errorMessage) {
1303
- yield ((_19 = api.output) === null || _19 === void 0 ? void 0 : _19.call(api, errorMessage, null));
1340
+ yield ((_20 = api.output) === null || _20 === void 0 ? void 0 : _20.call(api, errorMessage, null));
1304
1341
  }
1305
1342
  // Set default node as next node
1306
1343
  const defaultChild = childConfigs.find(child => child.type === "aiAgentJobDefault");
@@ -1312,7 +1349,7 @@ export const AI_AGENT_JOB = createNodeDescriptor({
1312
1349
  if (!errorHandlingGotoTarget) {
1313
1350
  throw new Error("GoTo Target is required");
1314
1351
  }
1315
- if (!((_20 = api.checkThink) === null || _20 === void 0 ? void 0 : _20.call(api, nodeId))) {
1352
+ if (!((_21 = api.checkThink) === null || _21 === void 0 ? void 0 : _21.call(api, nodeId))) {
1316
1353
  api.resetNextNodes();
1317
1354
  yield api.executeFlow({
1318
1355
  flowNode: {