@superblocksteam/vite-plugin-file-sync 2.0.43-next.9 → 2.0.49
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-service/agent/subagents/apis/api-executor.d.ts +57 -0
- package/dist/ai-service/agent/subagents/apis/api-executor.d.ts.map +1 -0
- package/dist/ai-service/agent/subagents/apis/api-executor.js +284 -0
- package/dist/ai-service/agent/subagents/apis/api-executor.js.map +1 -0
- package/dist/ai-service/agent/subagents/apis/context.d.ts +12 -0
- package/dist/ai-service/agent/subagents/apis/context.d.ts.map +1 -0
- package/dist/ai-service/agent/subagents/apis/context.js +18 -0
- package/dist/ai-service/agent/subagents/apis/context.js.map +1 -0
- package/dist/ai-service/agent/subagents/apis/generate-api-source.d.ts +37 -31
- package/dist/ai-service/agent/subagents/apis/generate-api-source.d.ts.map +1 -1
- package/dist/ai-service/agent/subagents/apis/generate-api-source.js +355 -479
- package/dist/ai-service/agent/subagents/apis/generate-api-source.js.map +1 -1
- package/dist/ai-service/agent/subagents/apis/state.d.ts +40 -0
- package/dist/ai-service/agent/subagents/apis/state.d.ts.map +1 -0
- package/dist/ai-service/agent/subagents/apis/state.js +25 -0
- package/dist/ai-service/agent/subagents/apis/state.js.map +1 -0
- package/dist/ai-service/agent/subagents/apis/types.d.ts +5 -0
- package/dist/ai-service/agent/subagents/apis/types.d.ts.map +1 -0
- package/dist/ai-service/agent/subagents/apis/types.js +2 -0
- package/dist/ai-service/agent/subagents/apis/types.js.map +1 -0
- package/dist/ai-service/agent/tool-message-utils.d.ts.map +1 -1
- package/dist/ai-service/agent/tool-message-utils.js +8 -24
- package/dist/ai-service/agent/tool-message-utils.js.map +1 -1
- package/dist/ai-service/agent/tools/apis/build-api.d.ts +20 -0
- package/dist/ai-service/agent/tools/apis/build-api.d.ts.map +1 -0
- package/dist/ai-service/agent/tools/apis/build-api.js +170 -0
- package/dist/ai-service/agent/tools/apis/build-api.js.map +1 -0
- package/dist/ai-service/agent/tools/apis/finalize-api.d.ts +15 -0
- package/dist/ai-service/agent/tools/apis/finalize-api.d.ts.map +1 -0
- package/dist/ai-service/agent/tools/apis/finalize-api.js +103 -0
- package/dist/ai-service/agent/tools/apis/finalize-api.js.map +1 -0
- package/dist/ai-service/agent/tools/build-finalize.d.ts +1 -22
- package/dist/ai-service/agent/tools/build-finalize.d.ts.map +1 -1
- package/dist/ai-service/agent/tools/build-finalize.js +27 -18
- package/dist/ai-service/agent/tools/build-finalize.js.map +1 -1
- package/dist/ai-service/agent/tools/integrations/execute-request.d.ts +1 -1
- package/dist/ai-service/agent/tools/integrations/execute-request.d.ts.map +1 -1
- package/dist/ai-service/agent/tools/integrations/index.d.ts +1 -1
- package/dist/ai-service/agent/tools/integrations/index.d.ts.map +1 -1
- package/dist/ai-service/agent/tools/integrations/index.js +1 -1
- package/dist/ai-service/agent/tools/integrations/index.js.map +1 -1
- package/dist/ai-service/agent/tools/integrations/internal.d.ts +1 -1
- package/dist/ai-service/agent/tools/integrations/internal.d.ts.map +1 -1
- package/dist/ai-service/agent/tools/integrations/internal.js +12 -1
- package/dist/ai-service/agent/tools/integrations/internal.js.map +1 -1
- package/dist/ai-service/agent/tools/integrations/metadata.d.ts +1 -1
- package/dist/ai-service/agent/tools/integrations/metadata.d.ts.map +1 -1
- package/dist/ai-service/agent/tools/integrations/metadata.js +1 -1
- package/dist/ai-service/agent/tools/integrations/metadata.js.map +1 -1
- package/dist/ai-service/agent/tools/integrations/run-code.d.ts +1 -1
- package/dist/ai-service/agent/tools/integrations/run-code.d.ts.map +1 -1
- package/dist/ai-service/agent/tools/study-current-app-state.d.ts +1 -0
- package/dist/ai-service/agent/tools/study-current-app-state.d.ts.map +1 -1
- package/dist/ai-service/agent/tools2/access-control.d.ts.map +1 -1
- package/dist/ai-service/agent/tools2/access-control.js +5 -2
- package/dist/ai-service/agent/tools2/access-control.js.map +1 -1
- package/dist/ai-service/agent/tools2/registry.d.ts +2 -1
- package/dist/ai-service/agent/tools2/registry.d.ts.map +1 -1
- package/dist/ai-service/agent/tools2/registry.js +4 -4
- package/dist/ai-service/agent/tools2/registry.js.map +1 -1
- package/dist/ai-service/agent/tools2/types.d.ts +17 -3
- package/dist/ai-service/agent/tools2/types.d.ts.map +1 -1
- package/dist/ai-service/agent/tools2/types.js +21 -0
- package/dist/ai-service/agent/tools2/types.js.map +1 -1
- package/dist/ai-service/agent/utils.d.ts +1 -0
- package/dist/ai-service/agent/utils.d.ts.map +1 -1
- package/dist/ai-service/agent/utils.js +1 -0
- package/dist/ai-service/agent/utils.js.map +1 -1
- package/dist/ai-service/integrations/metadata/database.d.ts.map +1 -1
- package/dist/ai-service/integrations/metadata/database.js +61 -20
- package/dist/ai-service/integrations/metadata/database.js.map +1 -1
- package/dist/ai-service/integrations/metadata/databricks.d.ts.map +1 -1
- package/dist/ai-service/integrations/metadata/databricks.js +5 -5
- package/dist/ai-service/integrations/metadata/databricks.js.map +1 -1
- package/dist/ai-service/integrations/metadata/graphql-based.d.ts +2 -0
- package/dist/ai-service/integrations/metadata/graphql-based.d.ts.map +1 -1
- package/dist/ai-service/integrations/metadata/graphql-based.js +95 -14
- package/dist/ai-service/integrations/metadata/graphql-based.js.map +1 -1
- package/dist/ai-service/integrations/metadata/llm-utils.d.ts +24 -0
- package/dist/ai-service/integrations/metadata/llm-utils.d.ts.map +1 -0
- package/dist/ai-service/integrations/metadata/llm-utils.js +45 -0
- package/dist/ai-service/integrations/metadata/llm-utils.js.map +1 -0
- package/dist/ai-service/integrations/store.d.ts +5 -5
- package/dist/ai-service/integrations/store.d.ts.map +1 -1
- package/dist/ai-service/integrations/store.js +52 -53
- package/dist/ai-service/integrations/store.js.map +1 -1
- package/dist/ai-service/llm/context/constants.d.ts +7 -6
- package/dist/ai-service/llm/context/constants.d.ts.map +1 -1
- package/dist/ai-service/llm/context/constants.js +7 -6
- package/dist/ai-service/llm/context/constants.js.map +1 -1
- package/dist/ai-service/llm/context/context-handle.d.ts +106 -0
- package/dist/ai-service/llm/context/context-handle.d.ts.map +1 -0
- package/dist/ai-service/llm/context/context-handle.js +134 -0
- package/dist/ai-service/llm/context/context-handle.js.map +1 -0
- package/dist/ai-service/llm/context/context-lock.d.ts +144 -0
- package/dist/ai-service/llm/context/context-lock.d.ts.map +1 -0
- package/dist/ai-service/llm/context/context-lock.js +221 -0
- package/dist/ai-service/llm/context/context-lock.js.map +1 -0
- package/dist/ai-service/llm/context/context.d.ts +18 -19
- package/dist/ai-service/llm/context/context.d.ts.map +1 -1
- package/dist/ai-service/llm/context/context.js +78 -129
- package/dist/ai-service/llm/context/context.js.map +1 -1
- package/dist/ai-service/llm/context/index.d.ts +4 -0
- package/dist/ai-service/llm/context/index.d.ts.map +1 -1
- package/dist/ai-service/llm/context/index.js +5 -0
- package/dist/ai-service/llm/context/index.js.map +1 -1
- package/dist/ai-service/llm/context/internal-types.d.ts +0 -2
- package/dist/ai-service/llm/context/internal-types.d.ts.map +1 -1
- package/dist/ai-service/llm/context/internal-types.js.map +1 -1
- package/dist/ai-service/llm/context/levels/l1.d.ts.map +1 -1
- package/dist/ai-service/llm/context/levels/l1.js +3 -5
- package/dist/ai-service/llm/context/levels/l1.js.map +1 -1
- package/dist/ai-service/llm/context/manager.d.ts +60 -11
- package/dist/ai-service/llm/context/manager.d.ts.map +1 -1
- package/dist/ai-service/llm/context/manager.js +113 -37
- package/dist/ai-service/llm/context/manager.js.map +1 -1
- package/dist/ai-service/llm/context/utils/content-compaction.d.ts +2 -2
- package/dist/ai-service/llm/context/utils/content-compaction.d.ts.map +1 -1
- package/dist/ai-service/llm/context/utils/content-compaction.js +6 -3
- package/dist/ai-service/llm/context/utils/content-compaction.js.map +1 -1
- package/dist/ai-service/llm/context/utils/index.d.ts +1 -1
- package/dist/ai-service/llm/context/utils/index.d.ts.map +1 -1
- package/dist/ai-service/llm/context/utils/index.js +1 -1
- package/dist/ai-service/llm/context/utils/index.js.map +1 -1
- package/dist/ai-service/llm/context/utils/message-utils.d.ts +17 -7
- package/dist/ai-service/llm/context/utils/message-utils.d.ts.map +1 -1
- package/dist/ai-service/llm/context/utils/message-utils.js +31 -18
- package/dist/ai-service/llm/context/utils/message-utils.js.map +1 -1
- package/dist/ai-service/llmobs/helpers.d.ts +9 -2
- package/dist/ai-service/llmobs/helpers.d.ts.map +1 -1
- package/dist/ai-service/llmobs/helpers.js +17 -4
- package/dist/ai-service/llmobs/helpers.js.map +1 -1
- package/dist/ai-service/llmobs/middleware/retry.d.ts +51 -0
- package/dist/ai-service/llmobs/middleware/retry.d.ts.map +1 -0
- package/dist/ai-service/llmobs/middleware/retry.js +147 -0
- package/dist/ai-service/llmobs/middleware/retry.js.map +1 -0
- package/dist/ai-service/llmobs/middleware/stream-text.d.ts.map +1 -1
- package/dist/ai-service/llmobs/middleware/stream-text.js +1 -0
- package/dist/ai-service/llmobs/middleware/stream-text.js.map +1 -1
- package/dist/ai-service/llmobs/tracer.d.ts +4 -0
- package/dist/ai-service/llmobs/tracer.d.ts.map +1 -1
- package/dist/ai-service/llmobs/tracer.js +11 -0
- package/dist/ai-service/llmobs/tracer.js.map +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ButtonPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/CheckboxPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ColumnPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ContainerPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/DatePickerPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/DropdownPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/IconPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ImagePropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/InputPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ModalPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/PagePropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SectionPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SlideoutPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SwitchPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/TablePropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-components/TextPropsDocs.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/Dim.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/EventFlow.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/TextStyleWithVariant.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/full-examples.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-api.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-components-rules.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-custom-components.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-data-filtering.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-event-flow.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-forms.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-layouts.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-page.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-rbac.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-routes.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-state.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-theming-chakra-new.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-base.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-incremental.js +1 -1
- package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-specific-edit.js +1 -1
- package/dist/ai-service/state-machine/clark-fsm.d.ts +2 -0
- package/dist/ai-service/state-machine/clark-fsm.d.ts.map +1 -1
- package/dist/ai-service/state-machine/clark-fsm.js.map +1 -1
- package/dist/ai-service/state-machine/handlers/llm-generating.d.ts.map +1 -1
- package/dist/ai-service/state-machine/handlers/llm-generating.js +363 -336
- package/dist/ai-service/state-machine/handlers/llm-generating.js.map +1 -1
- package/dist/ai-service/util/retry-on-timeout.d.ts +93 -0
- package/dist/ai-service/util/retry-on-timeout.d.ts.map +1 -0
- package/dist/ai-service/util/retry-on-timeout.js +153 -0
- package/dist/ai-service/util/retry-on-timeout.js.map +1 -0
- package/dist/ai-service/util/stop-condition.d.ts +4 -1
- package/dist/ai-service/util/stop-condition.d.ts.map +1 -1
- package/dist/ai-service/util/stop-condition.js +14 -2
- package/dist/ai-service/util/stop-condition.js.map +1 -1
- package/dist/sync-service/download.d.ts.map +1 -1
- package/dist/sync-service/download.js +28 -7
- package/dist/sync-service/download.js.map +1 -1
- package/dist/util/logger.d.ts +13 -0
- package/dist/util/logger.d.ts.map +1 -1
- package/dist/util/logger.js +21 -0
- package/dist/util/logger.js.map +1 -1
- package/package.json +11 -9
- package/dist/ai-service/llm/context/logger.d.ts +0 -17
- package/dist/ai-service/llm/context/logger.d.ts.map +0 -1
- package/dist/ai-service/llm/context/logger.js +0 -26
- package/dist/ai-service/llm/context/logger.js.map +0 -1
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { AiMode, } from "@superblocksteam/library-shared/types";
|
|
2
|
-
import {
|
|
2
|
+
import { smoothStream, } from "ai";
|
|
3
3
|
import { getErrorMeta, getLogger } from "../../../util/logger.js";
|
|
4
4
|
import { buildBaseSystemPrompt } from "../../agent/prompts/build-base-system-prompt.js";
|
|
5
5
|
import { getToolCallArguments } from "../../agent/tool-message-utils.js";
|
|
@@ -46,11 +46,21 @@ function formatSummaryForAgents(latestSummary) {
|
|
|
46
46
|
const buildUserMessage = (userPrompt, promptContext, mode, userApprovedPlan) => {
|
|
47
47
|
const content = [];
|
|
48
48
|
const focusedEntities = promptContext?.entities;
|
|
49
|
+
const selectedIntegrations = promptContext?.integrations;
|
|
50
|
+
// Add focused entities if present
|
|
49
51
|
if (focusedEntities?.length) {
|
|
50
52
|
content.push({
|
|
51
53
|
type: "text",
|
|
52
|
-
text: `<focused_entities>\nThe user has focused the editor on the following entities corresponding to the current app state. Attempt to constrain your actions to affect only these entities.\n${safeJsonStringify(focusedEntities)}\n</focused_entities>\n\n
|
|
53
|
-
|
|
54
|
+
text: `<focused_entities>\nThe user has focused the editor on the following entities corresponding to the current app state. Attempt to constrain your actions to affect only these entities.\n${safeJsonStringify(focusedEntities)}\n</focused_entities>\n\n`,
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
// Add selected integrations if present
|
|
58
|
+
if (selectedIntegrations?.length) {
|
|
59
|
+
// Omit metadata to reduce token usage - only include essential identifying information
|
|
60
|
+
const integrationsWithoutMetadata = selectedIntegrations.map(({ id, name, type }) => ({ id, name, type }));
|
|
61
|
+
content.push({
|
|
62
|
+
type: "text",
|
|
63
|
+
text: `<selected_integrations>\nThe user has explicitly selected the following integration configurations to use. You MUST use these specific integrations when creating or modifying APIs.\n${safeJsonStringify(integrationsWithoutMetadata)}\n</selected_integrations>\n\n`,
|
|
54
64
|
});
|
|
55
65
|
}
|
|
56
66
|
if (promptContext?.attachments?.length) {
|
|
@@ -95,244 +105,285 @@ export const doLLMGenerating = (clark, services) => {
|
|
|
95
105
|
const { abortController } = clark.context;
|
|
96
106
|
const contextId = getContextId(clark, services);
|
|
97
107
|
const contextOptions = clark.context.llmConfig?.contextOptions;
|
|
98
|
-
|
|
99
|
-
const
|
|
100
|
-
|
|
101
|
-
await
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
});
|
|
108
|
-
}
|
|
109
|
-
const systemPrompt = buildBaseSystemPrompt(mode);
|
|
110
|
-
const system = {
|
|
111
|
-
role: "system",
|
|
112
|
-
content: systemPrompt,
|
|
113
|
-
};
|
|
114
|
-
context.setSystemPrompt(system);
|
|
115
|
-
const prompt = buildUserMessage(userPrompt, promptContext, mode, userApprovedPlan);
|
|
116
|
-
context.startTurn(prompt);
|
|
117
|
-
const messages = context.getMessages();
|
|
118
|
-
const model = services.llmProvider.modelForClassification("broad_edit");
|
|
119
|
-
// Process LLM configuration up front so we can log it once at the top
|
|
120
|
-
const llmConfig = clark.context.llmConfig;
|
|
121
|
-
const disabledTools = clark.context.llmConfig?.disabledTools;
|
|
122
|
-
const { headers, thinkingEnabled, thinkingBudgetTokens, interleavedThinking, providerOptions, } = processLLMConfig(llmConfig, 5000, // default budget tokens
|
|
123
|
-
`LLM Generating (model=${model.modelId}, disabledTools=${disabledTools?.length ? disabledTools.join(",") : "none"})`);
|
|
124
|
-
const conversationId = Date.now();
|
|
125
|
-
const startTimestamp = new Date().toISOString();
|
|
126
|
-
const runTimestamp = clark.context.runTimestamp || startTimestamp;
|
|
127
|
-
// Initialize log content
|
|
128
|
-
const logRef = {
|
|
129
|
-
content: `=== LLM CONVERSATION START [${conversationId}] ===\n`,
|
|
130
|
-
};
|
|
131
|
-
logRef.content += `Timestamp: ${startTimestamp}\n`;
|
|
132
|
-
logRef.content += `Model: ${model.modelId}\n`;
|
|
133
|
-
// Log AI provider config
|
|
134
|
-
logRef.content += `Provider Config: provider=${llmConfig?.provider || "anthropic (default)"}, thinking=${thinkingEnabled}, budget=${thinkingBudgetTokens}, interleaved=${interleavedThinking}`;
|
|
135
|
-
if (disabledTools?.length) {
|
|
136
|
-
logRef.content += `, disabledTools=${disabledTools.join(",")}`;
|
|
137
|
-
}
|
|
138
|
-
if (headers && Object.keys(headers).length > 0) {
|
|
139
|
-
logRef.content += `, headers=${JSON.stringify(headers)}`;
|
|
140
|
-
}
|
|
141
|
-
logRef.content += `--- INPUT MESSAGES ---\n`;
|
|
142
|
-
logRef.content += `[SYSTEM] ${systemPrompt}\n\n`;
|
|
143
|
-
logRef.content += `[${system.role.toUpperCase()}] ${system.content}\n\n`;
|
|
144
|
-
// Note: Input token count will be logged once LLM call begins
|
|
145
|
-
// buildTools now returns filtered tools based on mode via registry
|
|
146
|
-
const actualTools = await buildTools(clark, services, promptContext, logRef, disabledTools, mode);
|
|
147
|
-
logRef.content += `Mode: ${mode}, Available tools: ${Object.keys(actualTools).length}\n`;
|
|
148
|
-
logRef.content += `Tools: ${Object.keys(actualTools).join(", ")}\n\n`;
|
|
149
|
-
let stepCount = 0;
|
|
150
|
-
// Track cumulative token usage across all steps
|
|
151
|
-
let totalInputTokens = 0;
|
|
152
|
-
let totalOutputTokens = 0;
|
|
153
|
-
let totalCachedTokens = 0;
|
|
154
|
-
services.clarkProfiler.startLLMWaiting({
|
|
155
|
-
messages: messages,
|
|
156
|
-
model: model.modelId,
|
|
157
|
-
});
|
|
158
|
-
let firstTokenReceived = false;
|
|
159
|
-
let thinkingSpanActive = false;
|
|
160
|
-
let textSpanActive = false;
|
|
161
|
-
const allReasoningDeltas = [];
|
|
162
|
-
let currentStepReasoningDeltas = [];
|
|
163
|
-
let currentStepTextDeltas = [];
|
|
164
|
-
getLogger().info(`CURRENT_MODE: ${mode}`);
|
|
165
|
-
const build = tracedStreamText({
|
|
166
|
-
abortSignal: abortController?.signal,
|
|
167
|
-
model,
|
|
168
|
-
providerOptions,
|
|
169
|
-
headers,
|
|
170
|
-
experimental_transform: [smoothStream({ chunking: "line" })],
|
|
171
|
-
messages,
|
|
172
|
-
tools: actualTools,
|
|
173
|
-
prepareStep: async (step) => {
|
|
174
|
-
context.startStep();
|
|
175
|
-
const messages = context.getMessages();
|
|
176
|
-
return { ...step, messages };
|
|
108
|
+
// Generate a unique owner ID for this LLM generation session
|
|
109
|
+
const ownerId = `llm-generating-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
|
|
110
|
+
// Acquire exclusive context access
|
|
111
|
+
const contextHandle = await services.contextManager.acquireContext(contextId, ownerId, {
|
|
112
|
+
contextOptions,
|
|
113
|
+
acquireOptions: {
|
|
114
|
+
waitTimeoutMs: 30000, // Wait up to 30 seconds for context
|
|
115
|
+
lockTimeoutMs: 600000, // Hold lock for up to 10 minutes
|
|
116
|
+
retryIntervalMs: 100,
|
|
177
117
|
},
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
118
|
+
});
|
|
119
|
+
try {
|
|
120
|
+
const context = contextHandle.context;
|
|
121
|
+
const mode = clark.context.currentMode;
|
|
122
|
+
const userApprovedPlan = clark.context.userApprovedPlan;
|
|
123
|
+
await chatSessionStore.getMessages();
|
|
124
|
+
const latestSummary = await chatSessionStore.getLatestSummary();
|
|
125
|
+
if (latestSummary) {
|
|
126
|
+
context.seedIfEmpty({
|
|
127
|
+
role: "user",
|
|
128
|
+
content: `Reference the context from the previous exchange with the user, if relevant:\n\n${formatSummaryForAgents(latestSummary)}`,
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
const systemPrompt = buildBaseSystemPrompt(mode);
|
|
132
|
+
const system = {
|
|
133
|
+
role: "system",
|
|
134
|
+
content: systemPrompt,
|
|
135
|
+
};
|
|
136
|
+
context.setSystemPrompt(system);
|
|
137
|
+
const prompt = buildUserMessage(userPrompt, promptContext, mode, userApprovedPlan);
|
|
138
|
+
context.startTurn(prompt);
|
|
139
|
+
const messages = context.getMessages();
|
|
140
|
+
const model = services.llmProvider.modelForClassification("broad_edit");
|
|
141
|
+
// Process LLM configuration up front so we can log it once at the top
|
|
142
|
+
const llmConfig = clark.context.llmConfig;
|
|
143
|
+
const disabledTools = clark.context.llmConfig?.disabledTools;
|
|
144
|
+
const { headers, thinkingEnabled, thinkingBudgetTokens, interleavedThinking, providerOptions, } = processLLMConfig(llmConfig, 5000, // default budget tokens
|
|
145
|
+
`LLM Generating (model=${model.modelId}, disabledTools=${disabledTools?.length ? disabledTools.join(",") : "none"})`);
|
|
146
|
+
const conversationId = Date.now();
|
|
147
|
+
const startTimestamp = new Date().toISOString();
|
|
148
|
+
const runTimestamp = clark.context.runTimestamp || startTimestamp;
|
|
149
|
+
// Initialize log content
|
|
150
|
+
const logRef = {
|
|
151
|
+
content: `=== LLM CONVERSATION START [${conversationId}] ===\n`,
|
|
152
|
+
};
|
|
153
|
+
logRef.content += `Timestamp: ${startTimestamp}\n`;
|
|
154
|
+
logRef.content += `Model: ${model.modelId}\n`;
|
|
155
|
+
// Log AI provider config
|
|
156
|
+
logRef.content += `Provider Config: provider=${llmConfig?.provider || "anthropic (default)"}, thinking=${thinkingEnabled}, budget=${thinkingBudgetTokens}, interleaved=${interleavedThinking}`;
|
|
157
|
+
if (disabledTools?.length) {
|
|
158
|
+
logRef.content += `, disabledTools=${disabledTools.join(",")}`;
|
|
159
|
+
}
|
|
160
|
+
if (headers && Object.keys(headers).length > 0) {
|
|
161
|
+
logRef.content += `, headers=${JSON.stringify(headers)}`;
|
|
162
|
+
}
|
|
163
|
+
logRef.content += `--- INPUT MESSAGES ---\n`;
|
|
164
|
+
logRef.content += `[SYSTEM] ${systemPrompt}\n\n`;
|
|
165
|
+
logRef.content += `[${system.role.toUpperCase()}] ${system.content}\n\n`;
|
|
166
|
+
// Note: Input token count will be logged once LLM call begins
|
|
167
|
+
// buildTools now returns filtered tools based on mode via registry
|
|
168
|
+
const actualTools = await buildTools(clark, services, promptContext, logRef, disabledTools, mode);
|
|
169
|
+
logRef.content += `Mode: ${mode}, Available tools: ${Object.keys(actualTools).length}\n`;
|
|
170
|
+
logRef.content += `Tools: ${Object.keys(actualTools).join(", ")}\n\n`;
|
|
171
|
+
let stepCount = 0;
|
|
172
|
+
// Track cumulative token usage across all steps
|
|
173
|
+
let totalInputTokens = 0;
|
|
174
|
+
let totalOutputTokens = 0;
|
|
175
|
+
let totalCachedTokens = 0;
|
|
176
|
+
services.clarkProfiler.startLLMWaiting({
|
|
177
|
+
messages: messages,
|
|
178
|
+
model: model.modelId,
|
|
179
|
+
});
|
|
180
|
+
let firstTokenReceived = false;
|
|
181
|
+
let thinkingSpanActive = false;
|
|
182
|
+
let textSpanActive = false;
|
|
183
|
+
const allReasoningDeltas = [];
|
|
184
|
+
let currentStepReasoningDeltas = [];
|
|
185
|
+
let currentStepTextDeltas = [];
|
|
186
|
+
getLogger().info(`CURRENT_MODE: ${mode}`);
|
|
187
|
+
const build = tracedStreamText({
|
|
188
|
+
abortSignal: abortController?.signal,
|
|
189
|
+
model,
|
|
190
|
+
providerOptions,
|
|
191
|
+
headers,
|
|
192
|
+
experimental_transform: [smoothStream({ chunking: "line" })],
|
|
193
|
+
messages,
|
|
194
|
+
tools: actualTools,
|
|
195
|
+
prepareStep: async (step) => {
|
|
196
|
+
context.startStep();
|
|
197
|
+
const messages = context.getMessages();
|
|
198
|
+
return { ...step, messages };
|
|
199
|
+
},
|
|
200
|
+
stopWhen: [
|
|
201
|
+
hasToolSuccess("build_finalize"),
|
|
202
|
+
hasToolSuccess("exitPlanMode"),
|
|
203
|
+
hasToolSuccess("askMultiChoice"),
|
|
204
|
+
],
|
|
205
|
+
onChunk: (chunkData) => {
|
|
206
|
+
if (!firstTokenReceived) {
|
|
207
|
+
firstTokenReceived = true;
|
|
208
|
+
services.clarkProfiler.startLLMStreaming({
|
|
209
|
+
firstChunk: chunkData.chunk.type === "text-delta"
|
|
210
|
+
? chunkData.chunk.text
|
|
211
|
+
: `[${chunkData.chunk.type}]`,
|
|
206
212
|
});
|
|
207
213
|
}
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
const textTrack = "text_generation";
|
|
211
|
-
services.clarkProfiler
|
|
212
|
-
.getProfiler()
|
|
213
|
-
.createTrack(textTrack, "Text Generation", "llm");
|
|
214
|
-
currentStepTextDeltas.push(chunkData.chunk.text);
|
|
215
|
-
if (!textSpanActive) {
|
|
216
|
-
textSpanActive = true;
|
|
214
|
+
if (chunkData.chunk.type === "reasoning-delta") {
|
|
215
|
+
const thinkingTrack = "thinking";
|
|
217
216
|
services.clarkProfiler
|
|
218
217
|
.getProfiler()
|
|
219
|
-
.
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
logRef.content += `--- OUTPUT STEP ${stepCount} [${stepTimestamp}] ---\n`;
|
|
232
|
-
if (step.reasoning && thinkingSpanActive) {
|
|
233
|
-
const thinkingTrack = "thinking";
|
|
234
|
-
services.clarkProfiler
|
|
235
|
-
.getProfiler()
|
|
236
|
-
.updateActiveFrameArgs(thinkingTrack, {
|
|
237
|
-
completeReasoningText: currentStepReasoningDeltas.join(" "),
|
|
238
|
-
reasoningLength: step.reasoning.length,
|
|
239
|
-
stepComplete: true,
|
|
240
|
-
});
|
|
241
|
-
services.clarkProfiler.getProfiler().endFrame(thinkingTrack);
|
|
242
|
-
thinkingSpanActive = false;
|
|
243
|
-
currentStepReasoningDeltas = [];
|
|
244
|
-
}
|
|
245
|
-
// Log token usage for this step and accumulate totals
|
|
246
|
-
if (step.usage) {
|
|
247
|
-
const stepInputTokens = step.usage.inputTokens ?? 0;
|
|
248
|
-
const stepOutputTokens = step.usage.outputTokens ?? 0;
|
|
249
|
-
const stepCachedTokens = step.usage.cachedInputTokens ?? 0;
|
|
250
|
-
// Accumulate totals
|
|
251
|
-
totalInputTokens += stepInputTokens;
|
|
252
|
-
totalOutputTokens += stepOutputTokens;
|
|
253
|
-
totalCachedTokens += stepCachedTokens;
|
|
254
|
-
logRef.content += `[TOKEN USAGE] Input: ${stepInputTokens}, Output: ${stepOutputTokens}, Total: ${step.usage.totalTokens ?? 0}`;
|
|
255
|
-
if (stepCachedTokens) {
|
|
256
|
-
logRef.content += `, Cached: ${stepCachedTokens}`;
|
|
218
|
+
.createTrack(thinkingTrack, "AI Thinking/Reasoning", "llm");
|
|
219
|
+
allReasoningDeltas.push(chunkData.chunk.text);
|
|
220
|
+
currentStepReasoningDeltas.push(chunkData.chunk.text);
|
|
221
|
+
if (!thinkingSpanActive) {
|
|
222
|
+
thinkingSpanActive = true;
|
|
223
|
+
services.clarkProfiler
|
|
224
|
+
.getProfiler()
|
|
225
|
+
.startFrame(`Thinking Step ${stepCount + 1}`, thinkingTrack, {
|
|
226
|
+
stepNumber: stepCount + 1,
|
|
227
|
+
chunkType: chunkData.chunk.type,
|
|
228
|
+
});
|
|
229
|
+
}
|
|
257
230
|
}
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
logRef.content += `[TOOLS CALLED]\n`;
|
|
273
|
-
toolsCalled.forEach((tool, idx) => {
|
|
274
|
-
logRef.content += ` Tool ${idx + 1}: ${tool.toolName}\n`;
|
|
275
|
-
logRef.content += ` Input: ${tool.input}\n`;
|
|
276
|
-
logRef.content += ` Output: ${tool.output}\n`;
|
|
277
|
-
});
|
|
278
|
-
try {
|
|
279
|
-
await Promise.all(step.toolCalls.map(async (toolCall) => {
|
|
280
|
-
const args = await getToolCallArguments(toolCall.toolName, toolCall.input, clark);
|
|
281
|
-
await services.chatSessionStore.recordAssistant({
|
|
282
|
-
type: "tool",
|
|
283
|
-
tool: toolCall.toolName,
|
|
284
|
-
args: args,
|
|
285
|
-
...(apiGroup && { group: apiGroup }),
|
|
231
|
+
if (chunkData.chunk.type === "text-delta") {
|
|
232
|
+
const textTrack = "text_generation";
|
|
233
|
+
services.clarkProfiler
|
|
234
|
+
.getProfiler()
|
|
235
|
+
.createTrack(textTrack, "Text Generation", "llm");
|
|
236
|
+
currentStepTextDeltas.push(chunkData.chunk.text);
|
|
237
|
+
if (!textSpanActive) {
|
|
238
|
+
textSpanActive = true;
|
|
239
|
+
services.clarkProfiler
|
|
240
|
+
.getProfiler()
|
|
241
|
+
.startFrame(`Text Generation Step ${stepCount + 1}`, textTrack, {
|
|
242
|
+
stepNumber: stepCount + 1,
|
|
243
|
+
firstTextDelta: chunkData.chunk.text.slice(0, 50) +
|
|
244
|
+
(chunkData.chunk.text.length > 50 ? "..." : ""),
|
|
286
245
|
});
|
|
287
|
-
}
|
|
246
|
+
}
|
|
288
247
|
}
|
|
289
|
-
|
|
290
|
-
|
|
248
|
+
},
|
|
249
|
+
onStepFinish: async (step) => {
|
|
250
|
+
stepCount++;
|
|
251
|
+
const stepTimestamp = new Date().toISOString();
|
|
252
|
+
context.endStep(step.response.messages, step.usage);
|
|
253
|
+
logRef.content += `--- OUTPUT STEP ${stepCount} [${stepTimestamp}] ---\n`;
|
|
254
|
+
if (step.reasoning && thinkingSpanActive) {
|
|
255
|
+
const thinkingTrack = "thinking";
|
|
256
|
+
services.clarkProfiler
|
|
257
|
+
.getProfiler()
|
|
258
|
+
.updateActiveFrameArgs(thinkingTrack, {
|
|
259
|
+
completeReasoningText: currentStepReasoningDeltas.join(" "),
|
|
260
|
+
reasoningLength: step.reasoning.length,
|
|
261
|
+
stepComplete: true,
|
|
262
|
+
});
|
|
263
|
+
services.clarkProfiler.getProfiler().endFrame(thinkingTrack);
|
|
264
|
+
thinkingSpanActive = false;
|
|
265
|
+
currentStepReasoningDeltas = [];
|
|
291
266
|
}
|
|
292
|
-
//
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
267
|
+
// Log token usage for this step and accumulate totals
|
|
268
|
+
if (step.usage) {
|
|
269
|
+
const stepInputTokens = step.usage.inputTokens ?? 0;
|
|
270
|
+
const stepOutputTokens = step.usage.outputTokens ?? 0;
|
|
271
|
+
const stepCachedTokens = step.usage.cachedInputTokens ?? 0;
|
|
272
|
+
// Accumulate totals
|
|
273
|
+
totalInputTokens += stepInputTokens;
|
|
274
|
+
totalOutputTokens += stepOutputTokens;
|
|
275
|
+
totalCachedTokens += stepCachedTokens;
|
|
276
|
+
logRef.content += `[TOKEN USAGE] Input: ${stepInputTokens}, Output: ${stepOutputTokens}, Total: ${step.usage.totalTokens ?? 0}`;
|
|
277
|
+
if (stepCachedTokens) {
|
|
278
|
+
logRef.content += `, Cached: ${stepCachedTokens}`;
|
|
300
279
|
}
|
|
280
|
+
logRef.content += `\n`;
|
|
281
|
+
}
|
|
282
|
+
const apiToolCall = step.toolCalls.find((toolCall) => toolCall.toolName === "build_generateApiSource");
|
|
283
|
+
const apiName = apiToolCall?.input?.apiName;
|
|
284
|
+
const apiGroup = apiName ? `api-${apiName}` : undefined;
|
|
285
|
+
// Record tool calls FIRST before other messages to ensure proper ordering
|
|
286
|
+
const toolsCalled = step.content
|
|
287
|
+
.filter((c) => c.type === "tool-result")
|
|
288
|
+
.map((c) => ({
|
|
289
|
+
toolName: c.toolName,
|
|
290
|
+
input: JSON.stringify(c.input),
|
|
291
|
+
output: JSON.stringify(c.output, null, 2),
|
|
292
|
+
}));
|
|
293
|
+
if (toolsCalled.length > 0) {
|
|
294
|
+
logRef.content += `[TOOLS CALLED]\n`;
|
|
295
|
+
toolsCalled.forEach((tool, idx) => {
|
|
296
|
+
logRef.content += ` Tool ${idx + 1}: ${tool.toolName}\n`;
|
|
297
|
+
logRef.content += ` Input: ${tool.input}\n`;
|
|
298
|
+
logRef.content += ` Output: ${tool.output}\n`;
|
|
299
|
+
});
|
|
301
300
|
try {
|
|
302
|
-
|
|
301
|
+
await Promise.all(step.toolCalls.map(async (toolCall) => {
|
|
302
|
+
const args = await getToolCallArguments(toolCall.toolName, toolCall.input, clark);
|
|
303
|
+
await services.chatSessionStore.recordAssistant({
|
|
304
|
+
type: "tool",
|
|
305
|
+
tool: toolCall.toolName,
|
|
306
|
+
args: args,
|
|
307
|
+
...(apiGroup && { group: apiGroup }),
|
|
308
|
+
});
|
|
309
|
+
}));
|
|
303
310
|
}
|
|
304
|
-
catch {
|
|
305
|
-
|
|
311
|
+
catch (error) {
|
|
312
|
+
getLogger().error("Failed to record tool calls", getErrorMeta(error));
|
|
306
313
|
}
|
|
314
|
+
// Record profiling events
|
|
315
|
+
toolsCalled.forEach((tool, idx) => {
|
|
316
|
+
let parsedInput, parsedOutput;
|
|
317
|
+
try {
|
|
318
|
+
parsedInput = JSON.parse(tool.input);
|
|
319
|
+
}
|
|
320
|
+
catch {
|
|
321
|
+
parsedInput = tool.input;
|
|
322
|
+
}
|
|
323
|
+
try {
|
|
324
|
+
parsedOutput = JSON.parse(tool.output);
|
|
325
|
+
}
|
|
326
|
+
catch {
|
|
327
|
+
parsedOutput = tool.output;
|
|
328
|
+
}
|
|
329
|
+
services.clarkProfiler
|
|
330
|
+
.getProfiler()
|
|
331
|
+
.addInstantEvent(`Tool Call: ${tool.toolName}`, "llm", {
|
|
332
|
+
step: stepCount,
|
|
333
|
+
toolIndex: idx + 1,
|
|
334
|
+
toolName: tool.toolName,
|
|
335
|
+
input: parsedInput,
|
|
336
|
+
output: parsedOutput,
|
|
337
|
+
inputSize: tool.input.length,
|
|
338
|
+
outputSize: tool.output.length,
|
|
339
|
+
});
|
|
340
|
+
});
|
|
341
|
+
}
|
|
342
|
+
// Record reasoning messages AFTER tool calls
|
|
343
|
+
if (step.reasoning) {
|
|
344
|
+
const reasoningLines = [
|
|
345
|
+
"[REASONING]",
|
|
346
|
+
...step.reasoning.map(({ text }) => text),
|
|
347
|
+
"",
|
|
348
|
+
];
|
|
349
|
+
logRef.content += reasoningLines.join("\n");
|
|
350
|
+
const reasoningText = step.reasoning
|
|
351
|
+
.map(({ text }) => text)
|
|
352
|
+
.join("");
|
|
353
|
+
if (reasoningText) {
|
|
354
|
+
try {
|
|
355
|
+
await services.chatSessionStore.recordAssistant({
|
|
356
|
+
type: "reasoning",
|
|
357
|
+
text: reasoningText,
|
|
358
|
+
...(apiGroup && { group: apiGroup }),
|
|
359
|
+
});
|
|
360
|
+
}
|
|
361
|
+
catch (error) {
|
|
362
|
+
getLogger().error("Failed to record message from LLM", getErrorMeta(error));
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
if (step.text && textSpanActive) {
|
|
367
|
+
const textTrack = "text_generation";
|
|
307
368
|
services.clarkProfiler
|
|
308
369
|
.getProfiler()
|
|
309
|
-
.
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
output: parsedOutput,
|
|
315
|
-
inputSize: tool.input.length,
|
|
316
|
-
outputSize: tool.output.length,
|
|
370
|
+
.updateActiveFrameArgs(textTrack, {
|
|
371
|
+
completeTextContent: currentStepTextDeltas.join(""),
|
|
372
|
+
finalText: step.text,
|
|
373
|
+
textLength: step.text.length,
|
|
374
|
+
stepComplete: true,
|
|
317
375
|
});
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
"",
|
|
326
|
-
];
|
|
327
|
-
logRef.content += reasoningLines.join("\n");
|
|
328
|
-
const reasoningText = step.reasoning
|
|
329
|
-
.map(({ text }) => text)
|
|
330
|
-
.join("");
|
|
331
|
-
if (reasoningText) {
|
|
376
|
+
services.clarkProfiler.getProfiler().endFrame(textTrack);
|
|
377
|
+
textSpanActive = false;
|
|
378
|
+
currentStepTextDeltas = [];
|
|
379
|
+
}
|
|
380
|
+
// Record text messages AFTER tool calls and reasoning
|
|
381
|
+
if (step.text) {
|
|
382
|
+
logRef.content += `[ASSISTANT TEXT] ${step.text}\n`;
|
|
332
383
|
try {
|
|
333
384
|
await services.chatSessionStore.recordAssistant({
|
|
334
|
-
type: "
|
|
335
|
-
text:
|
|
385
|
+
type: "text",
|
|
386
|
+
text: step.text,
|
|
336
387
|
...(apiGroup && { group: apiGroup }),
|
|
337
388
|
});
|
|
338
389
|
}
|
|
@@ -340,126 +391,102 @@ export const doLLMGenerating = (clark, services) => {
|
|
|
340
391
|
getLogger().error("Failed to record message from LLM", getErrorMeta(error));
|
|
341
392
|
}
|
|
342
393
|
}
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
394
|
+
logRef.content += `\n`;
|
|
395
|
+
},
|
|
396
|
+
onFinish: (result) => {
|
|
397
|
+
context.endTurn(result.totalUsage);
|
|
398
|
+
},
|
|
399
|
+
}, clark.tracer, clark.logger);
|
|
400
|
+
for await (const chunk of build.fullStream) {
|
|
401
|
+
await processStreamChunk(chunk, clark, logRef);
|
|
402
|
+
}
|
|
403
|
+
if (firstTokenReceived) {
|
|
404
|
+
services.clarkProfiler.endFrame();
|
|
405
|
+
}
|
|
406
|
+
services.clarkProfiler.endFrame();
|
|
407
|
+
if (thinkingSpanActive) {
|
|
408
|
+
services.clarkProfiler.getProfiler().endFrame("thinking");
|
|
409
|
+
}
|
|
410
|
+
if (textSpanActive) {
|
|
411
|
+
services.clarkProfiler.getProfiler().endFrame("text_generation");
|
|
412
|
+
}
|
|
413
|
+
const endTimestamp = new Date().toISOString();
|
|
414
|
+
logRef.content += `=== LLM CONVERSATION END [${conversationId}] ===\n`;
|
|
415
|
+
logRef.content += `End Timestamp: ${endTimestamp}\n`;
|
|
416
|
+
logRef.content += `Total Steps: ${stepCount}\n`;
|
|
417
|
+
// Log final token usage summary using accumulated totals
|
|
418
|
+
const finalTotalTokens = totalInputTokens + totalOutputTokens;
|
|
419
|
+
logRef.content += `[TOTAL TOKEN USAGE] Input: ${totalInputTokens}, Output: ${totalOutputTokens}, Total: ${finalTotalTokens}`;
|
|
420
|
+
if (totalCachedTokens > 0) {
|
|
421
|
+
logRef.content += `, Cached: ${totalCachedTokens}`;
|
|
422
|
+
}
|
|
423
|
+
logRef.content += `\n`;
|
|
424
|
+
try {
|
|
425
|
+
// Create TokenRequestData object from accumulated step data (not AI SDK usage which is only final step)
|
|
426
|
+
const requestTokenData = {
|
|
427
|
+
requestId: conversationId.toString(),
|
|
428
|
+
inputTokens: totalInputTokens,
|
|
429
|
+
outputTokens: totalOutputTokens,
|
|
430
|
+
totalTokens: totalInputTokens + totalOutputTokens,
|
|
431
|
+
cachedInputTokens: totalCachedTokens,
|
|
432
|
+
model: model.modelId,
|
|
433
|
+
startTime: startTimestamp,
|
|
434
|
+
endTime: endTimestamp,
|
|
435
|
+
};
|
|
436
|
+
await clark.context.peer?.call.aiPushTokenUsage(requestTokenData);
|
|
437
|
+
}
|
|
438
|
+
catch (error) {
|
|
439
|
+
// Token tracking is non-critical - log error but don't fail the AI request
|
|
440
|
+
getLogger().warn("Failed to send token usage data", error instanceof Error ? error.message : String(error));
|
|
441
|
+
}
|
|
442
|
+
// Save the complete log using saveGeneratedArtifact
|
|
443
|
+
try {
|
|
444
|
+
const logArtifact = {
|
|
445
|
+
type: "file",
|
|
446
|
+
filePath: `llm-conversation-${conversationId}.log`,
|
|
447
|
+
content: logRef.content,
|
|
448
|
+
};
|
|
449
|
+
const stepId = `llm-conversation-${conversationId}`;
|
|
450
|
+
await services.appShell.saveGeneratedArtifact(logArtifact, stepId, runTimestamp);
|
|
451
|
+
getLogger().debug("LLM conversation log saved");
|
|
452
|
+
}
|
|
453
|
+
catch (error) {
|
|
454
|
+
getLogger().error("Failed to save LLM conversation log", {
|
|
455
|
+
error: {
|
|
456
|
+
kind: "SaveLogError",
|
|
457
|
+
message: error instanceof Error ? error.message : String(error),
|
|
458
|
+
stack: error instanceof Error ? error.stack : undefined,
|
|
459
|
+
},
|
|
460
|
+
});
|
|
461
|
+
}
|
|
462
|
+
// Check if there are local draft changes
|
|
463
|
+
const hasLocalDraft = await services.draftInterface.hasLocalDraftChanges();
|
|
464
|
+
// In PLAN mode, only proceed to draft state if:
|
|
465
|
+
// 1. User has approved the plan (userApprovedPlan === true)
|
|
466
|
+
// 2. There are actual file changes (hasLocalDraft === true)
|
|
467
|
+
if (mode === AiMode.PLAN) {
|
|
468
|
+
if (!userApprovedPlan || !hasLocalDraft) {
|
|
469
|
+
void transitionTo({
|
|
470
|
+
type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
|
|
353
471
|
});
|
|
354
|
-
|
|
355
|
-
textSpanActive = false;
|
|
356
|
-
currentStepTextDeltas = [];
|
|
357
|
-
}
|
|
358
|
-
// Record text messages AFTER tool calls and reasoning
|
|
359
|
-
if (step.text) {
|
|
360
|
-
logRef.content += `[ASSISTANT TEXT] ${step.text}\n`;
|
|
361
|
-
try {
|
|
362
|
-
await services.chatSessionStore.recordAssistant({
|
|
363
|
-
type: "text",
|
|
364
|
-
text: step.text,
|
|
365
|
-
...(apiGroup && { group: apiGroup }),
|
|
366
|
-
});
|
|
367
|
-
}
|
|
368
|
-
catch (error) {
|
|
369
|
-
getLogger().error("Failed to record message from LLM", getErrorMeta(error));
|
|
370
|
-
}
|
|
472
|
+
return;
|
|
371
473
|
}
|
|
372
|
-
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
}
|
|
381
|
-
if (firstTokenReceived) {
|
|
382
|
-
services.clarkProfiler.endFrame();
|
|
383
|
-
}
|
|
384
|
-
services.clarkProfiler.endFrame();
|
|
385
|
-
if (thinkingSpanActive) {
|
|
386
|
-
services.clarkProfiler.getProfiler().endFrame("thinking");
|
|
387
|
-
}
|
|
388
|
-
if (textSpanActive) {
|
|
389
|
-
services.clarkProfiler.getProfiler().endFrame("text_generation");
|
|
390
|
-
}
|
|
391
|
-
const endTimestamp = new Date().toISOString();
|
|
392
|
-
logRef.content += `=== LLM CONVERSATION END [${conversationId}] ===\n`;
|
|
393
|
-
logRef.content += `End Timestamp: ${endTimestamp}\n`;
|
|
394
|
-
logRef.content += `Total Steps: ${stepCount}\n`;
|
|
395
|
-
// Log final token usage summary using accumulated totals
|
|
396
|
-
const finalTotalTokens = totalInputTokens + totalOutputTokens;
|
|
397
|
-
logRef.content += `[TOTAL TOKEN USAGE] Input: ${totalInputTokens}, Output: ${totalOutputTokens}, Total: ${finalTotalTokens}`;
|
|
398
|
-
if (totalCachedTokens > 0) {
|
|
399
|
-
logRef.content += `, Cached: ${totalCachedTokens}`;
|
|
400
|
-
}
|
|
401
|
-
logRef.content += `\n`;
|
|
402
|
-
try {
|
|
403
|
-
// Create TokenRequestData object from accumulated step data (not AI SDK usage which is only final step)
|
|
404
|
-
const requestTokenData = {
|
|
405
|
-
requestId: conversationId.toString(),
|
|
406
|
-
inputTokens: totalInputTokens,
|
|
407
|
-
outputTokens: totalOutputTokens,
|
|
408
|
-
totalTokens: totalInputTokens + totalOutputTokens,
|
|
409
|
-
cachedInputTokens: totalCachedTokens,
|
|
410
|
-
model: model.modelId,
|
|
411
|
-
startTime: startTimestamp,
|
|
412
|
-
endTime: endTimestamp,
|
|
413
|
-
};
|
|
414
|
-
await clark.context.peer?.call.aiPushTokenUsage(requestTokenData);
|
|
415
|
-
}
|
|
416
|
-
catch (error) {
|
|
417
|
-
// Token tracking is non-critical - log error but don't fail the AI request
|
|
418
|
-
getLogger().warn("Failed to send token usage data", error instanceof Error ? error.message : String(error));
|
|
419
|
-
}
|
|
420
|
-
// Save the complete log using saveGeneratedArtifact
|
|
421
|
-
try {
|
|
422
|
-
const logArtifact = {
|
|
423
|
-
type: "file",
|
|
424
|
-
filePath: `llm-conversation-${conversationId}.log`,
|
|
425
|
-
content: logRef.content,
|
|
426
|
-
};
|
|
427
|
-
const stepId = `llm-conversation-${conversationId}`;
|
|
428
|
-
await services.appShell.saveGeneratedArtifact(logArtifact, stepId, runTimestamp);
|
|
429
|
-
getLogger().debug("LLM conversation log saved");
|
|
430
|
-
}
|
|
431
|
-
catch (error) {
|
|
432
|
-
getLogger().error("Failed to save LLM conversation log", {
|
|
433
|
-
error: {
|
|
434
|
-
kind: "SaveLogError",
|
|
435
|
-
message: error instanceof Error ? error.message : String(error),
|
|
436
|
-
stack: error instanceof Error ? error.stack : undefined,
|
|
437
|
-
},
|
|
438
|
-
});
|
|
439
|
-
}
|
|
440
|
-
// Check if there are local draft changes
|
|
441
|
-
const hasLocalDraft = await services.draftInterface.hasLocalDraftChanges();
|
|
442
|
-
// In PLAN mode, only proceed to draft state if:
|
|
443
|
-
// 1. User has approved the plan (userApprovedPlan === true)
|
|
444
|
-
// 2. There are actual file changes (hasLocalDraft === true)
|
|
445
|
-
if (mode === AiMode.PLAN) {
|
|
446
|
-
if (!userApprovedPlan || !hasLocalDraft) {
|
|
474
|
+
// If plan is approved and there are changes, fall through to create draft
|
|
475
|
+
}
|
|
476
|
+
if (hasLocalDraft) {
|
|
477
|
+
void transitionTo({
|
|
478
|
+
type: V3_AGENT_FINISHED,
|
|
479
|
+
});
|
|
480
|
+
}
|
|
481
|
+
else {
|
|
447
482
|
void transitionTo({
|
|
448
483
|
type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
|
|
449
484
|
});
|
|
450
|
-
return;
|
|
451
485
|
}
|
|
452
|
-
// If plan is approved and there are changes, fall through to create draft
|
|
453
486
|
}
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
});
|
|
458
|
-
}
|
|
459
|
-
else {
|
|
460
|
-
void transitionTo({
|
|
461
|
-
type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
|
|
462
|
-
});
|
|
487
|
+
finally {
|
|
488
|
+
// Always release the context lock when done
|
|
489
|
+
contextHandle.release();
|
|
463
490
|
}
|
|
464
491
|
}
|
|
465
492
|
}
|