@cognigy/rest-api-client 2025.12.0 → 2025.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/build/apigroups/ResourcesAPIGroup_2_0.js +8 -1
- package/build/apigroups/SimulationAPIGroup_2_0.js +4 -1
- package/build/shared/charts/descriptors/analytics/trackGoal.js +3 -1
- package/build/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
- package/build/shared/charts/descriptors/index.js +5 -0
- package/build/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
- package/build/shared/charts/descriptors/message/question/question.js +12 -1
- package/build/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
- package/build/shared/charts/descriptors/service/GPTPrompt.js +21 -1
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +71 -175
- package/build/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
- package/build/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +175 -0
- package/build/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +194 -0
- package/build/shared/charts/descriptors/service/handoverV2.js +1 -1
- package/build/shared/charts/descriptors/service/index.js +11 -1
- package/build/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +959 -0
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +31 -0
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +196 -0
- package/build/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +139 -0
- package/build/shared/constants.js +1 -5
- package/build/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +3 -0
- package/build/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
- package/build/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
- package/build/shared/interfaces/messageAPI/handover.js +6 -0
- package/build/shared/interfaces/resources/ISimulation.js +9 -0
- package/build/shared/interfaces/resources/TResourceType.js +3 -0
- package/build/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
- package/build/shared/interfaces/resources/settings/IGenerativeAISettings.js +5 -18
- package/build/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +3 -0
- package/build/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +3 -0
- package/build/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +3 -0
- package/build/shared/interfaces/security/IPermission.js +4 -0
- package/build/shared/interfaces/security/IRole.js +5 -1
- package/build/shared/interfaces/security/index.js +1 -1
- package/dist/esm/apigroups/ResourcesAPIGroup_2_0.js +8 -1
- package/dist/esm/apigroups/SimulationAPIGroup_2_0.js +4 -1
- package/dist/esm/shared/charts/descriptors/analytics/trackGoal.js +3 -1
- package/dist/esm/shared/charts/descriptors/connectionNodes/internalStorageProviders/amazonStorageProviderConnection.js +7 -1
- package/dist/esm/shared/charts/descriptors/index.js +6 -1
- package/dist/esm/shared/charts/descriptors/knowledgeSearch/searchExtractOutput.js +10 -6
- package/dist/esm/shared/charts/descriptors/message/question/question.js +12 -1
- package/dist/esm/shared/charts/descriptors/nlu/generativeSlotFiller/prompt.js +11 -3
- package/dist/esm/shared/charts/descriptors/service/GPTPrompt.js +21 -1
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJob.js +72 -176
- package/dist/esm/shared/charts/descriptors/service/aiAgent/aiAgentJobTool.js +2 -2
- package/dist/esm/shared/charts/descriptors/service/aiAgent/helpers/createToolDefinitions.js +172 -0
- package/dist/esm/shared/charts/descriptors/service/aiAgent/loadAiAgent.js +192 -0
- package/dist/esm/shared/charts/descriptors/service/handoverV2.js +1 -1
- package/dist/esm/shared/charts/descriptors/service/index.js +5 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/LLMPromptV2.js +946 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptDefault.js +28 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptMCPTool.js +193 -0
- package/dist/esm/shared/charts/descriptors/service/llmPrompt/llmPromptTool.js +136 -0
- package/dist/esm/shared/constants.js +1 -5
- package/dist/esm/shared/interfaces/debugEvents/IGoalCompletedEventPayload.js +2 -0
- package/dist/esm/shared/interfaces/debugEvents/TDebugEventType.js +1 -0
- package/dist/esm/shared/interfaces/generativeAI/IGenerativeAIModels.js +12 -1
- package/dist/esm/shared/interfaces/messageAPI/handover.js +6 -0
- package/dist/esm/shared/interfaces/resources/ISimulation.js +6 -0
- package/dist/esm/shared/interfaces/resources/TResourceType.js +3 -0
- package/dist/esm/shared/interfaces/resources/knowledgeStore/IKnowledgeChunk.js +2 -1
- package/dist/esm/shared/interfaces/resources/settings/IGenerativeAISettings.js +4 -17
- package/dist/esm/shared/interfaces/restAPI/operations/generateOutput/v2.0/index.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/resources/chart/v2.0/IReadFlowChartAiAgentsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGeneratePersonaRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IGetPersonaOptionsRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/restAPI/simulation/persona/IRegeneratePersonaFieldRest_2_0.js +2 -0
- package/dist/esm/shared/interfaces/security/IPermission.js +4 -0
- package/dist/esm/shared/interfaces/security/IRole.js +5 -1
- package/dist/esm/shared/interfaces/security/index.js +1 -1
- package/package.json +1 -1
- package/types/index.d.ts +2093 -1927
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,13 @@
|
|
|
1
|
+
# 2025.14.0
|
|
2
|
+
Released: July 08th, 2025
|
|
3
|
+
|
|
4
|
+
Released state of package up to date with Cognigy.AI v2025.14.0
|
|
5
|
+
|
|
6
|
+
# 2025.13.0
|
|
7
|
+
Released: June 24th, 2025
|
|
8
|
+
|
|
9
|
+
Released state of package up to date with Cognigy.AI v2025.13.0
|
|
10
|
+
|
|
1
11
|
# 2025.12.0
|
|
2
12
|
Released: June 10th, 2025
|
|
3
13
|
|
|
@@ -628,7 +628,14 @@ const ResourcesAPIGroup_2_0 = (instance) => {
|
|
|
628
628
|
generateNluScores: (_a) => {
|
|
629
629
|
var { projectId } = _a, args = __rest(_a, ["projectId"]);
|
|
630
630
|
return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/nlu/scores`, "POST", self)(args);
|
|
631
|
-
}
|
|
631
|
+
},
|
|
632
|
+
generateDesignTimeLLMOutput: (_a) => {
|
|
633
|
+
var { projectId } = _a, args = __rest(_a, ["projectId"]);
|
|
634
|
+
return (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/projects/${projectId}/generate-output/design-time-llm`, "POST", self)(args);
|
|
635
|
+
},
|
|
636
|
+
readFlowChartAiAgents: ({ flowId, preferredLocaleId }, options) => (0, GenericAPIFn_1.GenericAPIFn)(`/new/v2.0/flows/${flowId}/chart/nodes/output/aiagents?${(0, query_1.stringifyQuery)({
|
|
637
|
+
preferredLocaleId
|
|
638
|
+
})}`, "GET", self)(undefined, options)
|
|
632
639
|
};
|
|
633
640
|
};
|
|
634
641
|
exports.ResourcesAPIGroup_2_0 = ResourcesAPIGroup_2_0;
|
|
@@ -51,7 +51,10 @@ function SimulationAPIGroup_2_0(instance) {
|
|
|
51
51
|
readSimulationRun: (_a, options) => {
|
|
52
52
|
var { simulationReference, simulationRunBatchReference, simulationRunReference } = _a, args = __rest(_a, ["simulationReference", "simulationRunBatchReference", "simulationRunReference"]);
|
|
53
53
|
return (0, GenericAPIFn_1.GenericAPIFn)(`/testing/beta/simulations/${simulationReference}/batches/${simulationRunBatchReference}/runs/${simulationRunReference}?${(0, query_1.stringifyQuery)(args)}`, "GET", self)(undefined, options);
|
|
54
|
-
}
|
|
54
|
+
},
|
|
55
|
+
getPersonaOptions: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/options", "POST", self)(args, options),
|
|
56
|
+
generatePersona: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/generate", "POST", self)(args, options),
|
|
57
|
+
regeneratePersonaField: (args, options) => (0, GenericAPIFn_1.GenericAPIFn)("/testing/beta/personas/regenerate-field", "POST", self)(args, options)
|
|
55
58
|
};
|
|
56
59
|
}
|
|
57
60
|
exports.SimulationAPIGroup_2_0 = SimulationAPIGroup_2_0;
|
|
@@ -47,7 +47,7 @@ exports.TRACK_GOAL = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
47
47
|
return;
|
|
48
48
|
}
|
|
49
49
|
const sessionState = api.getSessionStateCopy();
|
|
50
|
-
const { selectedSteps, version, name, goalId, } = config.goal;
|
|
50
|
+
const { selectedSteps, referenceId, version, name, goalId, } = config.goal;
|
|
51
51
|
const activeCycleIds = ((_a = sessionState.analytics) === null || _a === void 0 ? void 0 : _a.goalCycleIds) || {};
|
|
52
52
|
let cycleId = activeCycleIds[goalId];
|
|
53
53
|
const hasStartStep = selectedSteps.some(step => step.type === "start");
|
|
@@ -75,10 +75,12 @@ exports.TRACK_GOAL = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
75
75
|
organisationId,
|
|
76
76
|
sessionId,
|
|
77
77
|
version,
|
|
78
|
+
referenceId,
|
|
78
79
|
timestamp: new Date(),
|
|
79
80
|
goalCycleId: cycleId,
|
|
80
81
|
goalId,
|
|
81
82
|
stepId: step.stepId,
|
|
83
|
+
stepType: step.type,
|
|
82
84
|
endpointUrlToken,
|
|
83
85
|
endpointName,
|
|
84
86
|
endpointType,
|
|
@@ -9,6 +9,12 @@ exports.AMAZON_STORAGE_PROVIDER_CONNECTION = {
|
|
|
9
9
|
{ fieldName: "secretAccessKey", label: "UI__CONNECTION_EDITOR__FIELD_SECRET_ACCESS_KEY" },
|
|
10
10
|
{ fieldName: "region", label: "UI__CONNECTION_EDITOR__FIELD_REGION" },
|
|
11
11
|
{ fieldName: "bucketName", label: "UI__CONNECTION_EDITOR__FIELD_BUCKET_NAME" },
|
|
12
|
-
|
|
12
|
+
{
|
|
13
|
+
fieldName: "customUrl",
|
|
14
|
+
label: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL",
|
|
15
|
+
required: false,
|
|
16
|
+
description: "UI__CONNECTION_EDITOR__FIELD_CUSTOM_URL_AWS__DESCRIPTION"
|
|
17
|
+
},
|
|
18
|
+
],
|
|
13
19
|
};
|
|
14
20
|
//# sourceMappingURL=amazonStorageProviderConnection.js.map
|
|
@@ -91,6 +91,10 @@ const nodes = [
|
|
|
91
91
|
service_1.CHECK_AGENT_AVAILABILITY,
|
|
92
92
|
service_1.HTTP_REQUEST,
|
|
93
93
|
service_1.GPT_PROMPT,
|
|
94
|
+
service_1.LLM_PROMPT_V2,
|
|
95
|
+
service_1.LLM_PROMPT_DEFAULT,
|
|
96
|
+
service_1.LLM_PROMPT_MCP_TOOL,
|
|
97
|
+
service_1.LLM_PROMPT_TOOL,
|
|
94
98
|
service_1.GPT_CONVERSATION,
|
|
95
99
|
service_1.GPT_CONVERSATION_SUMMARY,
|
|
96
100
|
service_1.LLM_ENTITY_EXTRACT,
|
|
@@ -146,6 +150,7 @@ if (process.env.DISABLE_FEATURE_TRANSCRIPT_MANAGER !== "true") {
|
|
|
146
150
|
nodes.push(service_1.AI_AGENT_JOB_CALL_MCP_TOOL);
|
|
147
151
|
nodes.push(service_1.AI_AGENT_TOOL_ANSWER);
|
|
148
152
|
nodes.push(service_1.AI_AGENT_HANDOVER);
|
|
153
|
+
nodes.push(service_1.LOAD_AI_AGENT);
|
|
149
154
|
}
|
|
150
155
|
if (process.env.FEATURE_USE_COGNIGY_LIVE_AGENT === "true") {
|
|
151
156
|
nodes.push(liveAgent_1.ASSIST_INFO);
|
|
@@ -584,10 +584,12 @@ exports.SEARCH_EXTRACT_OUTPUT = (0, createNodeDescriptor_1.createNodeDescriptor)
|
|
|
584
584
|
// as it doesn't make sense to check for follow ups in the first execution
|
|
585
585
|
if (input.execution > 1) {
|
|
586
586
|
// always remember the last thing the user said (needed later)
|
|
587
|
-
lastRoundTrip = (_b = cognigy
|
|
587
|
+
lastRoundTrip = (_b = cognigy
|
|
588
|
+
.lastConversationEntries) === null || _b === void 0 ? void 0 : _b.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
|
|
588
589
|
// if follow up detection is set to 2 or more, we use the conversation transcript
|
|
589
590
|
// as reference. Start at the second entry, because the first one is the current
|
|
590
|
-
const recentConversation = (_c = cognigy
|
|
591
|
+
const recentConversation = (_c = cognigy
|
|
592
|
+
.lastConversationEntries) === null || _c === void 0 ? void 0 : _c.slice(1, followUpDetectionSteps + 1).reverse().map(entry => "- " + (entry.source === "user" ? "USER: " : "BOT: ") + entry.text).join("\n");
|
|
591
593
|
prompt = `Below is the transcript of a conversation:
|
|
592
594
|
${recentConversation}
|
|
593
595
|
USER: ${searchString}
|
|
@@ -639,10 +641,12 @@ New: `;
|
|
|
639
641
|
message: (error === null || error === void 0 ? void 0 : error.message) || error,
|
|
640
642
|
};
|
|
641
643
|
api.logDebugError(JSON.stringify(compactError, undefined, 2), "Search Extract Output: Error");
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
644
|
+
if (!(error instanceof errors_1.InternalServerError)) {
|
|
645
|
+
api.emitToOpsCenter({
|
|
646
|
+
subComponent: "KnowledgeAIQueries",
|
|
647
|
+
title: error === null || error === void 0 ? void 0 : error.message
|
|
648
|
+
});
|
|
649
|
+
}
|
|
646
650
|
if ((_a = error === null || error === void 0 ? void 0 : error.originalErrorDetails) === null || _a === void 0 ? void 0 : _a.code) {
|
|
647
651
|
compactError["code"] = error.originalErrorDetails.code;
|
|
648
652
|
}
|
|
@@ -309,7 +309,7 @@ exports.QUESTION = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
309
309
|
},
|
|
310
310
|
{
|
|
311
311
|
key: "repromptLLMPrompt",
|
|
312
|
-
type: "
|
|
312
|
+
type: "cognigyLLMText",
|
|
313
313
|
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_PROMPT__LABEL",
|
|
314
314
|
description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__REPROMPT_LLM_PROMPT__DESCRIPTION",
|
|
315
315
|
defaultValue: `You are a chatbot that helps a user.
|
|
@@ -742,6 +742,17 @@ DO NOT talk about other topics. Do not offer general assistance.`,
|
|
|
742
742
|
},
|
|
743
743
|
defaultValue: true,
|
|
744
744
|
},
|
|
745
|
+
{
|
|
746
|
+
key: "escalateAnswersRepeatHandoverMessage",
|
|
747
|
+
type: "toggle",
|
|
748
|
+
label: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__LABEL",
|
|
749
|
+
description: "UI__NODE_EDITOR__MESSAGE__QUESTION__QUESTION__FIELDS__ESCALATE_ANSWERS_REPEAT_HANDOVER_MESSAGE__DESCRIPTION",
|
|
750
|
+
defaultValue: false,
|
|
751
|
+
condition: {
|
|
752
|
+
key: "escalateAnswersAction",
|
|
753
|
+
value: "handover"
|
|
754
|
+
}
|
|
755
|
+
},
|
|
745
756
|
{
|
|
746
757
|
key: "escalateAnswersHandoverCancelIntent",
|
|
747
758
|
type: "cognigyText",
|
|
@@ -133,8 +133,9 @@ exports.createLastUserInputString = createLastUserInputString;
|
|
|
133
133
|
* @param debugLogTokenCount whether to log the token count
|
|
134
134
|
* @param debugLogRequestAndCompletion whether to log the request and completion
|
|
135
135
|
* @param cognigy the cognigy object (input, api, etc)
|
|
136
|
+
* @param nodeType the type of the node (optional)
|
|
136
137
|
*/
|
|
137
|
-
const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy) => {
|
|
138
|
+
const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, debugLogRequestAndCompletion, cognigy, nodeType) => {
|
|
138
139
|
var _a, _b, _c, _d;
|
|
139
140
|
const { api, input } = cognigy;
|
|
140
141
|
if (input.endpointType !== "adminconsole" && !api.getMetadata().isFollowSessionActive) {
|
|
@@ -151,14 +152,21 @@ const writeLLMDebugLogs = async (label, prompt, response, debugLogTokenCount, de
|
|
|
151
152
|
if (debugLogTokenCount) {
|
|
152
153
|
if (prompt) {
|
|
153
154
|
const requestTokens = (_a = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _a === void 0 ? void 0 : _a.inputTokens;
|
|
154
|
-
requestTokenMessage = ` (${requestTokens} Tokens)`;
|
|
155
|
+
requestTokenMessage = ` (${nodeType === "llmPromptV2" ? "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__FULL_REQUEST: " : ""}${requestTokens} Tokens)`;
|
|
155
156
|
}
|
|
156
157
|
if (response) {
|
|
157
158
|
const completionTokens = (_b = response === null || response === void 0 ? void 0 : response.tokenUsage) === null || _b === void 0 ? void 0 : _b.outputTokens;
|
|
158
159
|
completionTokenMessage = ` (${completionTokens} Tokens)`;
|
|
159
160
|
}
|
|
160
161
|
}
|
|
161
|
-
|
|
162
|
+
let inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__REQUEST";
|
|
163
|
+
let headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER";
|
|
164
|
+
if (nodeType === "llmPromptV2") {
|
|
165
|
+
inputLabelKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__SYSTEM_PROMPT";
|
|
166
|
+
headerKey = "UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__HEADER_WITH_SYSTEM_PROMPT";
|
|
167
|
+
}
|
|
168
|
+
;
|
|
169
|
+
api.logDebugMessage(`${inputLabelKey}${requestTokenMessage}:<br>${prompt}<br><br>UI__DEBUG_MODE__LLM_PROMPT__FULL_REQUEST__COMPLETION${completionTokenMessage}:<br>${responseOutputFormatted}`, headerKey);
|
|
162
170
|
}
|
|
163
171
|
catch (err) { }
|
|
164
172
|
}
|
|
@@ -7,9 +7,10 @@ const logic_1 = require("../logic");
|
|
|
7
7
|
const crypto_1 = require("crypto");
|
|
8
8
|
const prompt_1 = require("../nlu/generativeSlotFiller/prompt");
|
|
9
9
|
const errors_1 = require("../../../errors");
|
|
10
|
+
const transcripts_1 = require("../../../interfaces/transcripts/transcripts");
|
|
10
11
|
exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
11
12
|
type: "completeText",
|
|
12
|
-
defaultLabel: "LLM Prompt",
|
|
13
|
+
defaultLabel: "LLM Prompt (legacy)",
|
|
13
14
|
summary: "UI__NODE_EDITOR__SERVICE__GPT_PROMPT__DESCRIPTION",
|
|
14
15
|
fields: [
|
|
15
16
|
{
|
|
@@ -586,6 +587,12 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
586
587
|
const errorResponse = {
|
|
587
588
|
error: compactError,
|
|
588
589
|
};
|
|
590
|
+
if (!(error instanceof errors_1.InternalServerError)) {
|
|
591
|
+
api.emitToOpsCenter({
|
|
592
|
+
subComponent: "LargeLanguageModelCalls",
|
|
593
|
+
title: error === null || error === void 0 ? void 0 : error.message
|
|
594
|
+
});
|
|
595
|
+
}
|
|
589
596
|
// add error to context or input
|
|
590
597
|
switch (storeLocation) {
|
|
591
598
|
case "context":
|
|
@@ -645,6 +652,7 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
645
652
|
api.output(text, {
|
|
646
653
|
_cognigy: {
|
|
647
654
|
_messageId,
|
|
655
|
+
_preventTranscript: true
|
|
648
656
|
}
|
|
649
657
|
});
|
|
650
658
|
}
|
|
@@ -702,6 +710,18 @@ exports.GPT_PROMPT = (0, createNodeDescriptor_1.createNodeDescriptor)({
|
|
|
702
710
|
const resultToOutput = typeof ((response === null || response === void 0 ? void 0 : response.result) || response) === "object" ? JSON.stringify((response === null || response === void 0 ? void 0 : response.result) || response, undefined, 2) : (response === null || response === void 0 ? void 0 : response.result) || response;
|
|
703
711
|
await api.output(resultToOutput, null);
|
|
704
712
|
}
|
|
713
|
+
else if (storeLocation === "stream") {
|
|
714
|
+
const transcriptContent = {
|
|
715
|
+
role: transcripts_1.TranscriptRole.ASSISTANT,
|
|
716
|
+
type: transcripts_1.TranscriptEntryType.OUTPUT,
|
|
717
|
+
source: "assistant",
|
|
718
|
+
payload: {
|
|
719
|
+
text: ((response === null || response === void 0 ? void 0 : response.result) || response),
|
|
720
|
+
data: {},
|
|
721
|
+
}
|
|
722
|
+
};
|
|
723
|
+
await api.addTranscriptStep(transcriptContent);
|
|
724
|
+
}
|
|
705
725
|
if (storeLocation === "stream" && responseToStore.finishReason) {
|
|
706
726
|
// send the finishReason as last output for a stream
|
|
707
727
|
(_a = api.output) === null || _a === void 0 ? void 0 : _a.call(api, "", {
|