@superblocksteam/vite-plugin-file-sync 2.0.43-next.12 → 2.0.43-next.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. package/dist/ai-service/agent/subagents/apis/generate-api-source.d.ts.map +1 -1
  2. package/dist/ai-service/agent/subagents/apis/generate-api-source.js +269 -249
  3. package/dist/ai-service/agent/subagents/apis/generate-api-source.js.map +1 -1
  4. package/dist/ai-service/agent/tools/build-finalize.d.ts +1 -22
  5. package/dist/ai-service/agent/tools/build-finalize.d.ts.map +1 -1
  6. package/dist/ai-service/agent/tools/build-finalize.js +27 -18
  7. package/dist/ai-service/agent/tools/build-finalize.js.map +1 -1
  8. package/dist/ai-service/agent/tools2/types.d.ts +12 -0
  9. package/dist/ai-service/agent/tools2/types.d.ts.map +1 -1
  10. package/dist/ai-service/agent/tools2/types.js +20 -0
  11. package/dist/ai-service/agent/tools2/types.js.map +1 -1
  12. package/dist/ai-service/llm/context/constants.d.ts +7 -6
  13. package/dist/ai-service/llm/context/constants.d.ts.map +1 -1
  14. package/dist/ai-service/llm/context/constants.js +7 -6
  15. package/dist/ai-service/llm/context/constants.js.map +1 -1
  16. package/dist/ai-service/llm/context/context-handle.d.ts +106 -0
  17. package/dist/ai-service/llm/context/context-handle.d.ts.map +1 -0
  18. package/dist/ai-service/llm/context/context-handle.js +134 -0
  19. package/dist/ai-service/llm/context/context-handle.js.map +1 -0
  20. package/dist/ai-service/llm/context/context-lock.d.ts +144 -0
  21. package/dist/ai-service/llm/context/context-lock.d.ts.map +1 -0
  22. package/dist/ai-service/llm/context/context-lock.js +221 -0
  23. package/dist/ai-service/llm/context/context-lock.js.map +1 -0
  24. package/dist/ai-service/llm/context/context.d.ts +18 -19
  25. package/dist/ai-service/llm/context/context.d.ts.map +1 -1
  26. package/dist/ai-service/llm/context/context.js +76 -127
  27. package/dist/ai-service/llm/context/context.js.map +1 -1
  28. package/dist/ai-service/llm/context/index.d.ts +4 -0
  29. package/dist/ai-service/llm/context/index.d.ts.map +1 -1
  30. package/dist/ai-service/llm/context/index.js +5 -0
  31. package/dist/ai-service/llm/context/index.js.map +1 -1
  32. package/dist/ai-service/llm/context/internal-types.d.ts +0 -2
  33. package/dist/ai-service/llm/context/internal-types.d.ts.map +1 -1
  34. package/dist/ai-service/llm/context/internal-types.js.map +1 -1
  35. package/dist/ai-service/llm/context/levels/l1.d.ts.map +1 -1
  36. package/dist/ai-service/llm/context/levels/l1.js +3 -5
  37. package/dist/ai-service/llm/context/levels/l1.js.map +1 -1
  38. package/dist/ai-service/llm/context/manager.d.ts +60 -11
  39. package/dist/ai-service/llm/context/manager.d.ts.map +1 -1
  40. package/dist/ai-service/llm/context/manager.js +111 -35
  41. package/dist/ai-service/llm/context/manager.js.map +1 -1
  42. package/dist/ai-service/llm/context/utils/content-compaction.d.ts +2 -2
  43. package/dist/ai-service/llm/context/utils/content-compaction.d.ts.map +1 -1
  44. package/dist/ai-service/llm/context/utils/content-compaction.js +6 -3
  45. package/dist/ai-service/llm/context/utils/content-compaction.js.map +1 -1
  46. package/dist/ai-service/llm/context/utils/index.d.ts +1 -1
  47. package/dist/ai-service/llm/context/utils/index.d.ts.map +1 -1
  48. package/dist/ai-service/llm/context/utils/index.js +1 -1
  49. package/dist/ai-service/llm/context/utils/index.js.map +1 -1
  50. package/dist/ai-service/llm/context/utils/message-utils.d.ts +17 -7
  51. package/dist/ai-service/llm/context/utils/message-utils.d.ts.map +1 -1
  52. package/dist/ai-service/llm/context/utils/message-utils.js +31 -18
  53. package/dist/ai-service/llm/context/utils/message-utils.js.map +1 -1
  54. package/dist/ai-service/llmobs/middleware/stream-text.d.ts.map +1 -1
  55. package/dist/ai-service/llmobs/middleware/stream-text.js +1 -0
  56. package/dist/ai-service/llmobs/middleware/stream-text.js.map +1 -1
  57. package/dist/ai-service/llmobs/tracer.d.ts +4 -0
  58. package/dist/ai-service/llmobs/tracer.d.ts.map +1 -1
  59. package/dist/ai-service/llmobs/tracer.js +11 -0
  60. package/dist/ai-service/llmobs/tracer.js.map +1 -1
  61. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ButtonPropsDocs.js +1 -1
  62. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/CheckboxPropsDocs.js +1 -1
  63. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ColumnPropsDocs.js +1 -1
  64. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ContainerPropsDocs.js +1 -1
  65. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/DatePickerPropsDocs.js +1 -1
  66. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/DropdownPropsDocs.js +1 -1
  67. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/IconPropsDocs.js +1 -1
  68. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ImagePropsDocs.js +1 -1
  69. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/InputPropsDocs.js +1 -1
  70. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ModalPropsDocs.js +1 -1
  71. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/PagePropsDocs.js +1 -1
  72. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SectionPropsDocs.js +1 -1
  73. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SlideoutPropsDocs.js +1 -1
  74. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SwitchPropsDocs.js +1 -1
  75. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/TablePropsDocs.js +1 -1
  76. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/TextPropsDocs.js +1 -1
  77. package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/Dim.js +1 -1
  78. package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/EventFlow.js +1 -1
  79. package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/TextStyleWithVariant.js +1 -1
  80. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/full-examples.js +1 -1
  81. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-api.js +1 -1
  82. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-components-rules.js +1 -1
  83. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-custom-components.js +1 -1
  84. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-data-filtering.js +1 -1
  85. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-event-flow.js +1 -1
  86. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-forms.js +1 -1
  87. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-layouts.js +1 -1
  88. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-page.js +1 -1
  89. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-rbac.js +1 -1
  90. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-routes.js +1 -1
  91. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-state.js +1 -1
  92. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-theming-chakra-new.js +1 -1
  93. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-base.js +1 -1
  94. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-incremental.js +1 -1
  95. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-specific-edit.js +1 -1
  96. package/dist/ai-service/state-machine/handlers/llm-generating.d.ts.map +1 -1
  97. package/dist/ai-service/state-machine/handlers/llm-generating.js +351 -334
  98. package/dist/ai-service/state-machine/handlers/llm-generating.js.map +1 -1
  99. package/dist/ai-service/util/stop-condition.d.ts +4 -1
  100. package/dist/ai-service/util/stop-condition.d.ts.map +1 -1
  101. package/dist/ai-service/util/stop-condition.js +14 -2
  102. package/dist/ai-service/util/stop-condition.js.map +1 -1
  103. package/dist/sync-service/download.d.ts.map +1 -1
  104. package/dist/sync-service/download.js +28 -7
  105. package/dist/sync-service/download.js.map +1 -1
  106. package/package.json +9 -8
@@ -1,5 +1,5 @@
1
1
  import { AiMode, } from "@superblocksteam/library-shared/types";
2
- import { hasToolCall, smoothStream, } from "ai";
2
+ import { smoothStream, } from "ai";
3
3
  import { getErrorMeta, getLogger } from "../../../util/logger.js";
4
4
  import { buildBaseSystemPrompt } from "../../agent/prompts/build-base-system-prompt.js";
5
5
  import { getToolCallArguments } from "../../agent/tool-message-utils.js";
@@ -95,244 +95,285 @@ export const doLLMGenerating = (clark, services) => {
95
95
  const { abortController } = clark.context;
96
96
  const contextId = getContextId(clark, services);
97
97
  const contextOptions = clark.context.llmConfig?.contextOptions;
98
- const context = await services.contextManager.getContext(contextId, contextOptions);
99
- const mode = clark.context.currentMode;
100
- const userApprovedPlan = clark.context.userApprovedPlan;
101
- await chatSessionStore.getMessages();
102
- const latestSummary = await chatSessionStore.getLatestSummary();
103
- if (latestSummary) {
104
- context.seedIfEmpty({
105
- role: "user",
106
- content: `Reference the context from the previous exchange with the user, if relevant:\n\n${formatSummaryForAgents(latestSummary)}`,
107
- });
108
- }
109
- const systemPrompt = buildBaseSystemPrompt(mode);
110
- const system = {
111
- role: "system",
112
- content: systemPrompt,
113
- };
114
- context.setSystemPrompt(system);
115
- const prompt = buildUserMessage(userPrompt, promptContext, mode, userApprovedPlan);
116
- context.startTurn(prompt);
117
- const messages = context.getMessages();
118
- const model = services.llmProvider.modelForClassification("broad_edit");
119
- // Process LLM configuration up front so we can log it once at the top
120
- const llmConfig = clark.context.llmConfig;
121
- const disabledTools = clark.context.llmConfig?.disabledTools;
122
- const { headers, thinkingEnabled, thinkingBudgetTokens, interleavedThinking, providerOptions, } = processLLMConfig(llmConfig, 5000, // default budget tokens
123
- `LLM Generating (model=${model.modelId}, disabledTools=${disabledTools?.length ? disabledTools.join(",") : "none"})`);
124
- const conversationId = Date.now();
125
- const startTimestamp = new Date().toISOString();
126
- const runTimestamp = clark.context.runTimestamp || startTimestamp;
127
- // Initialize log content
128
- const logRef = {
129
- content: `=== LLM CONVERSATION START [${conversationId}] ===\n`,
130
- };
131
- logRef.content += `Timestamp: ${startTimestamp}\n`;
132
- logRef.content += `Model: ${model.modelId}\n`;
133
- // Log AI provider config
134
- logRef.content += `Provider Config: provider=${llmConfig?.provider || "anthropic (default)"}, thinking=${thinkingEnabled}, budget=${thinkingBudgetTokens}, interleaved=${interleavedThinking}`;
135
- if (disabledTools?.length) {
136
- logRef.content += `, disabledTools=${disabledTools.join(",")}`;
137
- }
138
- if (headers && Object.keys(headers).length > 0) {
139
- logRef.content += `, headers=${JSON.stringify(headers)}`;
140
- }
141
- logRef.content += `--- INPUT MESSAGES ---\n`;
142
- logRef.content += `[SYSTEM] ${systemPrompt}\n\n`;
143
- logRef.content += `[${system.role.toUpperCase()}] ${system.content}\n\n`;
144
- // Note: Input token count will be logged once LLM call begins
145
- // buildTools now returns filtered tools based on mode via registry
146
- const actualTools = await buildTools(clark, services, promptContext, logRef, disabledTools, mode);
147
- logRef.content += `Mode: ${mode}, Available tools: ${Object.keys(actualTools).length}\n`;
148
- logRef.content += `Tools: ${Object.keys(actualTools).join(", ")}\n\n`;
149
- let stepCount = 0;
150
- // Track cumulative token usage across all steps
151
- let totalInputTokens = 0;
152
- let totalOutputTokens = 0;
153
- let totalCachedTokens = 0;
154
- services.clarkProfiler.startLLMWaiting({
155
- messages: messages,
156
- model: model.modelId,
157
- });
158
- let firstTokenReceived = false;
159
- let thinkingSpanActive = false;
160
- let textSpanActive = false;
161
- const allReasoningDeltas = [];
162
- let currentStepReasoningDeltas = [];
163
- let currentStepTextDeltas = [];
164
- getLogger().info(`CURRENT_MODE: ${mode}`);
165
- const build = tracedStreamText({
166
- abortSignal: abortController?.signal,
167
- model,
168
- providerOptions,
169
- headers,
170
- experimental_transform: [smoothStream({ chunking: "line" })],
171
- messages,
172
- tools: actualTools,
173
- prepareStep: async (step) => {
174
- context.startStep();
175
- const messages = context.getMessages();
176
- return { ...step, messages };
98
+ // Generate a unique owner ID for this LLM generation session
99
+ const ownerId = `llm-generating-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
100
+ // Acquire exclusive context access
101
+ const contextHandle = await services.contextManager.acquireContext(contextId, ownerId, {
102
+ contextOptions,
103
+ acquireOptions: {
104
+ waitTimeoutMs: 30000, // Wait up to 30 seconds for context
105
+ lockTimeoutMs: 600000, // Hold lock for up to 10 minutes
106
+ retryIntervalMs: 100,
177
107
  },
178
- stopWhen: [
179
- hasToolCall("build_finalize"),
180
- hasToolSuccess("exitPlanMode"),
181
- hasToolSuccess("askMultiChoice"),
182
- ],
183
- onChunk: (chunkData) => {
184
- if (!firstTokenReceived) {
185
- firstTokenReceived = true;
186
- services.clarkProfiler.startLLMStreaming({
187
- firstChunk: chunkData.chunk.type === "text-delta"
188
- ? chunkData.chunk.text
189
- : `[${chunkData.chunk.type}]`,
190
- });
191
- }
192
- if (chunkData.chunk.type === "reasoning-delta") {
193
- const thinkingTrack = "thinking";
194
- services.clarkProfiler
195
- .getProfiler()
196
- .createTrack(thinkingTrack, "AI Thinking/Reasoning", "llm");
197
- allReasoningDeltas.push(chunkData.chunk.text);
198
- currentStepReasoningDeltas.push(chunkData.chunk.text);
199
- if (!thinkingSpanActive) {
200
- thinkingSpanActive = true;
201
- services.clarkProfiler
202
- .getProfiler()
203
- .startFrame(`Thinking Step ${stepCount + 1}`, thinkingTrack, {
204
- stepNumber: stepCount + 1,
205
- chunkType: chunkData.chunk.type,
108
+ });
109
+ try {
110
+ const context = contextHandle.context;
111
+ const mode = clark.context.currentMode;
112
+ const userApprovedPlan = clark.context.userApprovedPlan;
113
+ await chatSessionStore.getMessages();
114
+ const latestSummary = await chatSessionStore.getLatestSummary();
115
+ if (latestSummary) {
116
+ context.seedIfEmpty({
117
+ role: "user",
118
+ content: `Reference the context from the previous exchange with the user, if relevant:\n\n${formatSummaryForAgents(latestSummary)}`,
119
+ });
120
+ }
121
+ const systemPrompt = buildBaseSystemPrompt(mode);
122
+ const system = {
123
+ role: "system",
124
+ content: systemPrompt,
125
+ };
126
+ context.setSystemPrompt(system);
127
+ const prompt = buildUserMessage(userPrompt, promptContext, mode, userApprovedPlan);
128
+ context.startTurn(prompt);
129
+ const messages = context.getMessages();
130
+ const model = services.llmProvider.modelForClassification("broad_edit");
131
+ // Process LLM configuration up front so we can log it once at the top
132
+ const llmConfig = clark.context.llmConfig;
133
+ const disabledTools = clark.context.llmConfig?.disabledTools;
134
+ const { headers, thinkingEnabled, thinkingBudgetTokens, interleavedThinking, providerOptions, } = processLLMConfig(llmConfig, 5000, // default budget tokens
135
+ `LLM Generating (model=${model.modelId}, disabledTools=${disabledTools?.length ? disabledTools.join(",") : "none"})`);
136
+ const conversationId = Date.now();
137
+ const startTimestamp = new Date().toISOString();
138
+ const runTimestamp = clark.context.runTimestamp || startTimestamp;
139
+ // Initialize log content
140
+ const logRef = {
141
+ content: `=== LLM CONVERSATION START [${conversationId}] ===\n`,
142
+ };
143
+ logRef.content += `Timestamp: ${startTimestamp}\n`;
144
+ logRef.content += `Model: ${model.modelId}\n`;
145
+ // Log AI provider config
146
+ logRef.content += `Provider Config: provider=${llmConfig?.provider || "anthropic (default)"}, thinking=${thinkingEnabled}, budget=${thinkingBudgetTokens}, interleaved=${interleavedThinking}`;
147
+ if (disabledTools?.length) {
148
+ logRef.content += `, disabledTools=${disabledTools.join(",")}`;
149
+ }
150
+ if (headers && Object.keys(headers).length > 0) {
151
+ logRef.content += `, headers=${JSON.stringify(headers)}`;
152
+ }
153
+ logRef.content += `--- INPUT MESSAGES ---\n`;
154
+ logRef.content += `[SYSTEM] ${systemPrompt}\n\n`;
155
+ logRef.content += `[${system.role.toUpperCase()}] ${system.content}\n\n`;
156
+ // Note: Input token count will be logged once LLM call begins
157
+ // buildTools now returns filtered tools based on mode via registry
158
+ const actualTools = await buildTools(clark, services, promptContext, logRef, disabledTools, mode);
159
+ logRef.content += `Mode: ${mode}, Available tools: ${Object.keys(actualTools).length}\n`;
160
+ logRef.content += `Tools: ${Object.keys(actualTools).join(", ")}\n\n`;
161
+ let stepCount = 0;
162
+ // Track cumulative token usage across all steps
163
+ let totalInputTokens = 0;
164
+ let totalOutputTokens = 0;
165
+ let totalCachedTokens = 0;
166
+ services.clarkProfiler.startLLMWaiting({
167
+ messages: messages,
168
+ model: model.modelId,
169
+ });
170
+ let firstTokenReceived = false;
171
+ let thinkingSpanActive = false;
172
+ let textSpanActive = false;
173
+ const allReasoningDeltas = [];
174
+ let currentStepReasoningDeltas = [];
175
+ let currentStepTextDeltas = [];
176
+ getLogger().info(`CURRENT_MODE: ${mode}`);
177
+ const build = tracedStreamText({
178
+ abortSignal: abortController?.signal,
179
+ model,
180
+ providerOptions,
181
+ headers,
182
+ experimental_transform: [smoothStream({ chunking: "line" })],
183
+ messages,
184
+ tools: actualTools,
185
+ prepareStep: async (step) => {
186
+ context.startStep();
187
+ const messages = context.getMessages();
188
+ return { ...step, messages };
189
+ },
190
+ stopWhen: [
191
+ hasToolSuccess("build_finalize"),
192
+ hasToolSuccess("exitPlanMode"),
193
+ hasToolSuccess("askMultiChoice"),
194
+ ],
195
+ onChunk: (chunkData) => {
196
+ if (!firstTokenReceived) {
197
+ firstTokenReceived = true;
198
+ services.clarkProfiler.startLLMStreaming({
199
+ firstChunk: chunkData.chunk.type === "text-delta"
200
+ ? chunkData.chunk.text
201
+ : `[${chunkData.chunk.type}]`,
206
202
  });
207
203
  }
208
- }
209
- if (chunkData.chunk.type === "text-delta") {
210
- const textTrack = "text_generation";
211
- services.clarkProfiler
212
- .getProfiler()
213
- .createTrack(textTrack, "Text Generation", "llm");
214
- currentStepTextDeltas.push(chunkData.chunk.text);
215
- if (!textSpanActive) {
216
- textSpanActive = true;
204
+ if (chunkData.chunk.type === "reasoning-delta") {
205
+ const thinkingTrack = "thinking";
217
206
  services.clarkProfiler
218
207
  .getProfiler()
219
- .startFrame(`Text Generation Step ${stepCount + 1}`, textTrack, {
220
- stepNumber: stepCount + 1,
221
- firstTextDelta: chunkData.chunk.text.slice(0, 50) +
222
- (chunkData.chunk.text.length > 50 ? "..." : ""),
223
- });
224
- }
225
- }
226
- },
227
- onStepFinish: async (step) => {
228
- stepCount++;
229
- const stepTimestamp = new Date().toISOString();
230
- context.endStep(step.response.messages, step.usage);
231
- logRef.content += `--- OUTPUT STEP ${stepCount} [${stepTimestamp}] ---\n`;
232
- if (step.reasoning && thinkingSpanActive) {
233
- const thinkingTrack = "thinking";
234
- services.clarkProfiler
235
- .getProfiler()
236
- .updateActiveFrameArgs(thinkingTrack, {
237
- completeReasoningText: currentStepReasoningDeltas.join(" "),
238
- reasoningLength: step.reasoning.length,
239
- stepComplete: true,
240
- });
241
- services.clarkProfiler.getProfiler().endFrame(thinkingTrack);
242
- thinkingSpanActive = false;
243
- currentStepReasoningDeltas = [];
244
- }
245
- // Log token usage for this step and accumulate totals
246
- if (step.usage) {
247
- const stepInputTokens = step.usage.inputTokens ?? 0;
248
- const stepOutputTokens = step.usage.outputTokens ?? 0;
249
- const stepCachedTokens = step.usage.cachedInputTokens ?? 0;
250
- // Accumulate totals
251
- totalInputTokens += stepInputTokens;
252
- totalOutputTokens += stepOutputTokens;
253
- totalCachedTokens += stepCachedTokens;
254
- logRef.content += `[TOKEN USAGE] Input: ${stepInputTokens}, Output: ${stepOutputTokens}, Total: ${step.usage.totalTokens ?? 0}`;
255
- if (stepCachedTokens) {
256
- logRef.content += `, Cached: ${stepCachedTokens}`;
208
+ .createTrack(thinkingTrack, "AI Thinking/Reasoning", "llm");
209
+ allReasoningDeltas.push(chunkData.chunk.text);
210
+ currentStepReasoningDeltas.push(chunkData.chunk.text);
211
+ if (!thinkingSpanActive) {
212
+ thinkingSpanActive = true;
213
+ services.clarkProfiler
214
+ .getProfiler()
215
+ .startFrame(`Thinking Step ${stepCount + 1}`, thinkingTrack, {
216
+ stepNumber: stepCount + 1,
217
+ chunkType: chunkData.chunk.type,
218
+ });
219
+ }
257
220
  }
258
- logRef.content += `\n`;
259
- }
260
- const apiToolCall = step.toolCalls.find((toolCall) => toolCall.toolName === "build_generateApiSource");
261
- const apiName = apiToolCall?.input?.apiName;
262
- const apiGroup = apiName ? `api-${apiName}` : undefined;
263
- // Record tool calls FIRST before other messages to ensure proper ordering
264
- const toolsCalled = step.content
265
- .filter((c) => c.type === "tool-result")
266
- .map((c) => ({
267
- toolName: c.toolName,
268
- input: JSON.stringify(c.input),
269
- output: JSON.stringify(c.output, null, 2),
270
- }));
271
- if (toolsCalled.length > 0) {
272
- logRef.content += `[TOOLS CALLED]\n`;
273
- toolsCalled.forEach((tool, idx) => {
274
- logRef.content += ` Tool ${idx + 1}: ${tool.toolName}\n`;
275
- logRef.content += ` Input: ${tool.input}\n`;
276
- logRef.content += ` Output: ${tool.output}\n`;
277
- });
278
- try {
279
- await Promise.all(step.toolCalls.map(async (toolCall) => {
280
- const args = await getToolCallArguments(toolCall.toolName, toolCall.input, clark);
281
- await services.chatSessionStore.recordAssistant({
282
- type: "tool",
283
- tool: toolCall.toolName,
284
- args: args,
285
- ...(apiGroup && { group: apiGroup }),
221
+ if (chunkData.chunk.type === "text-delta") {
222
+ const textTrack = "text_generation";
223
+ services.clarkProfiler
224
+ .getProfiler()
225
+ .createTrack(textTrack, "Text Generation", "llm");
226
+ currentStepTextDeltas.push(chunkData.chunk.text);
227
+ if (!textSpanActive) {
228
+ textSpanActive = true;
229
+ services.clarkProfiler
230
+ .getProfiler()
231
+ .startFrame(`Text Generation Step ${stepCount + 1}`, textTrack, {
232
+ stepNumber: stepCount + 1,
233
+ firstTextDelta: chunkData.chunk.text.slice(0, 50) +
234
+ (chunkData.chunk.text.length > 50 ? "..." : ""),
286
235
  });
287
- }));
236
+ }
288
237
  }
289
- catch (error) {
290
- getLogger().error("Failed to record tool calls", getErrorMeta(error));
238
+ },
239
+ onStepFinish: async (step) => {
240
+ stepCount++;
241
+ const stepTimestamp = new Date().toISOString();
242
+ context.endStep(step.response.messages, step.usage);
243
+ logRef.content += `--- OUTPUT STEP ${stepCount} [${stepTimestamp}] ---\n`;
244
+ if (step.reasoning && thinkingSpanActive) {
245
+ const thinkingTrack = "thinking";
246
+ services.clarkProfiler
247
+ .getProfiler()
248
+ .updateActiveFrameArgs(thinkingTrack, {
249
+ completeReasoningText: currentStepReasoningDeltas.join(" "),
250
+ reasoningLength: step.reasoning.length,
251
+ stepComplete: true,
252
+ });
253
+ services.clarkProfiler.getProfiler().endFrame(thinkingTrack);
254
+ thinkingSpanActive = false;
255
+ currentStepReasoningDeltas = [];
291
256
  }
292
- // Record profiling events
293
- toolsCalled.forEach((tool, idx) => {
294
- let parsedInput, parsedOutput;
295
- try {
296
- parsedInput = JSON.parse(tool.input);
297
- }
298
- catch {
299
- parsedInput = tool.input;
257
+ // Log token usage for this step and accumulate totals
258
+ if (step.usage) {
259
+ const stepInputTokens = step.usage.inputTokens ?? 0;
260
+ const stepOutputTokens = step.usage.outputTokens ?? 0;
261
+ const stepCachedTokens = step.usage.cachedInputTokens ?? 0;
262
+ // Accumulate totals
263
+ totalInputTokens += stepInputTokens;
264
+ totalOutputTokens += stepOutputTokens;
265
+ totalCachedTokens += stepCachedTokens;
266
+ logRef.content += `[TOKEN USAGE] Input: ${stepInputTokens}, Output: ${stepOutputTokens}, Total: ${step.usage.totalTokens ?? 0}`;
267
+ if (stepCachedTokens) {
268
+ logRef.content += `, Cached: ${stepCachedTokens}`;
300
269
  }
270
+ logRef.content += `\n`;
271
+ }
272
+ const apiToolCall = step.toolCalls.find((toolCall) => toolCall.toolName === "build_generateApiSource");
273
+ const apiName = apiToolCall?.input?.apiName;
274
+ const apiGroup = apiName ? `api-${apiName}` : undefined;
275
+ // Record tool calls FIRST before other messages to ensure proper ordering
276
+ const toolsCalled = step.content
277
+ .filter((c) => c.type === "tool-result")
278
+ .map((c) => ({
279
+ toolName: c.toolName,
280
+ input: JSON.stringify(c.input),
281
+ output: JSON.stringify(c.output, null, 2),
282
+ }));
283
+ if (toolsCalled.length > 0) {
284
+ logRef.content += `[TOOLS CALLED]\n`;
285
+ toolsCalled.forEach((tool, idx) => {
286
+ logRef.content += ` Tool ${idx + 1}: ${tool.toolName}\n`;
287
+ logRef.content += ` Input: ${tool.input}\n`;
288
+ logRef.content += ` Output: ${tool.output}\n`;
289
+ });
301
290
  try {
302
- parsedOutput = JSON.parse(tool.output);
291
+ await Promise.all(step.toolCalls.map(async (toolCall) => {
292
+ const args = await getToolCallArguments(toolCall.toolName, toolCall.input, clark);
293
+ await services.chatSessionStore.recordAssistant({
294
+ type: "tool",
295
+ tool: toolCall.toolName,
296
+ args: args,
297
+ ...(apiGroup && { group: apiGroup }),
298
+ });
299
+ }));
303
300
  }
304
- catch {
305
- parsedOutput = tool.output;
301
+ catch (error) {
302
+ getLogger().error("Failed to record tool calls", getErrorMeta(error));
306
303
  }
304
+ // Record profiling events
305
+ toolsCalled.forEach((tool, idx) => {
306
+ let parsedInput, parsedOutput;
307
+ try {
308
+ parsedInput = JSON.parse(tool.input);
309
+ }
310
+ catch {
311
+ parsedInput = tool.input;
312
+ }
313
+ try {
314
+ parsedOutput = JSON.parse(tool.output);
315
+ }
316
+ catch {
317
+ parsedOutput = tool.output;
318
+ }
319
+ services.clarkProfiler
320
+ .getProfiler()
321
+ .addInstantEvent(`Tool Call: ${tool.toolName}`, "llm", {
322
+ step: stepCount,
323
+ toolIndex: idx + 1,
324
+ toolName: tool.toolName,
325
+ input: parsedInput,
326
+ output: parsedOutput,
327
+ inputSize: tool.input.length,
328
+ outputSize: tool.output.length,
329
+ });
330
+ });
331
+ }
332
+ // Record reasoning messages AFTER tool calls
333
+ if (step.reasoning) {
334
+ const reasoningLines = [
335
+ "[REASONING]",
336
+ ...step.reasoning.map(({ text }) => text),
337
+ "",
338
+ ];
339
+ logRef.content += reasoningLines.join("\n");
340
+ const reasoningText = step.reasoning
341
+ .map(({ text }) => text)
342
+ .join("");
343
+ if (reasoningText) {
344
+ try {
345
+ await services.chatSessionStore.recordAssistant({
346
+ type: "reasoning",
347
+ text: reasoningText,
348
+ ...(apiGroup && { group: apiGroup }),
349
+ });
350
+ }
351
+ catch (error) {
352
+ getLogger().error("Failed to record message from LLM", getErrorMeta(error));
353
+ }
354
+ }
355
+ }
356
+ if (step.text && textSpanActive) {
357
+ const textTrack = "text_generation";
307
358
  services.clarkProfiler
308
359
  .getProfiler()
309
- .addInstantEvent(`Tool Call: ${tool.toolName}`, "llm", {
310
- step: stepCount,
311
- toolIndex: idx + 1,
312
- toolName: tool.toolName,
313
- input: parsedInput,
314
- output: parsedOutput,
315
- inputSize: tool.input.length,
316
- outputSize: tool.output.length,
360
+ .updateActiveFrameArgs(textTrack, {
361
+ completeTextContent: currentStepTextDeltas.join(""),
362
+ finalText: step.text,
363
+ textLength: step.text.length,
364
+ stepComplete: true,
317
365
  });
318
- });
319
- }
320
- // Record reasoning messages AFTER tool calls
321
- if (step.reasoning) {
322
- const reasoningLines = [
323
- "[REASONING]",
324
- ...step.reasoning.map(({ text }) => text),
325
- "",
326
- ];
327
- logRef.content += reasoningLines.join("\n");
328
- const reasoningText = step.reasoning
329
- .map(({ text }) => text)
330
- .join("");
331
- if (reasoningText) {
366
+ services.clarkProfiler.getProfiler().endFrame(textTrack);
367
+ textSpanActive = false;
368
+ currentStepTextDeltas = [];
369
+ }
370
+ // Record text messages AFTER tool calls and reasoning
371
+ if (step.text) {
372
+ logRef.content += `[ASSISTANT TEXT] ${step.text}\n`;
332
373
  try {
333
374
  await services.chatSessionStore.recordAssistant({
334
- type: "reasoning",
335
- text: reasoningText,
375
+ type: "text",
376
+ text: step.text,
336
377
  ...(apiGroup && { group: apiGroup }),
337
378
  });
338
379
  }
@@ -340,126 +381,102 @@ export const doLLMGenerating = (clark, services) => {
340
381
  getLogger().error("Failed to record message from LLM", getErrorMeta(error));
341
382
  }
342
383
  }
343
- }
344
- if (step.text && textSpanActive) {
345
- const textTrack = "text_generation";
346
- services.clarkProfiler
347
- .getProfiler()
348
- .updateActiveFrameArgs(textTrack, {
349
- completeTextContent: currentStepTextDeltas.join(""),
350
- finalText: step.text,
351
- textLength: step.text.length,
352
- stepComplete: true,
384
+ logRef.content += `\n`;
385
+ },
386
+ onFinish: (result) => {
387
+ context.endTurn(result.totalUsage);
388
+ },
389
+ }, clark.tracer, clark.logger);
390
+ for await (const chunk of build.fullStream) {
391
+ await processStreamChunk(chunk, clark, logRef);
392
+ }
393
+ if (firstTokenReceived) {
394
+ services.clarkProfiler.endFrame();
395
+ }
396
+ services.clarkProfiler.endFrame();
397
+ if (thinkingSpanActive) {
398
+ services.clarkProfiler.getProfiler().endFrame("thinking");
399
+ }
400
+ if (textSpanActive) {
401
+ services.clarkProfiler.getProfiler().endFrame("text_generation");
402
+ }
403
+ const endTimestamp = new Date().toISOString();
404
+ logRef.content += `=== LLM CONVERSATION END [${conversationId}] ===\n`;
405
+ logRef.content += `End Timestamp: ${endTimestamp}\n`;
406
+ logRef.content += `Total Steps: ${stepCount}\n`;
407
+ // Log final token usage summary using accumulated totals
408
+ const finalTotalTokens = totalInputTokens + totalOutputTokens;
409
+ logRef.content += `[TOTAL TOKEN USAGE] Input: ${totalInputTokens}, Output: ${totalOutputTokens}, Total: ${finalTotalTokens}`;
410
+ if (totalCachedTokens > 0) {
411
+ logRef.content += `, Cached: ${totalCachedTokens}`;
412
+ }
413
+ logRef.content += `\n`;
414
+ try {
415
+ // Create TokenRequestData object from accumulated step data (not AI SDK usage which is only final step)
416
+ const requestTokenData = {
417
+ requestId: conversationId.toString(),
418
+ inputTokens: totalInputTokens,
419
+ outputTokens: totalOutputTokens,
420
+ totalTokens: totalInputTokens + totalOutputTokens,
421
+ cachedInputTokens: totalCachedTokens,
422
+ model: model.modelId,
423
+ startTime: startTimestamp,
424
+ endTime: endTimestamp,
425
+ };
426
+ await clark.context.peer?.call.aiPushTokenUsage(requestTokenData);
427
+ }
428
+ catch (error) {
429
+ // Token tracking is non-critical - log error but don't fail the AI request
430
+ getLogger().warn("Failed to send token usage data", error instanceof Error ? error.message : String(error));
431
+ }
432
+ // Save the complete log using saveGeneratedArtifact
433
+ try {
434
+ const logArtifact = {
435
+ type: "file",
436
+ filePath: `llm-conversation-${conversationId}.log`,
437
+ content: logRef.content,
438
+ };
439
+ const stepId = `llm-conversation-${conversationId}`;
440
+ await services.appShell.saveGeneratedArtifact(logArtifact, stepId, runTimestamp);
441
+ getLogger().debug("LLM conversation log saved");
442
+ }
443
+ catch (error) {
444
+ getLogger().error("Failed to save LLM conversation log", {
445
+ error: {
446
+ kind: "SaveLogError",
447
+ message: error instanceof Error ? error.message : String(error),
448
+ stack: error instanceof Error ? error.stack : undefined,
449
+ },
450
+ });
451
+ }
452
+ // Check if there are local draft changes
453
+ const hasLocalDraft = await services.draftInterface.hasLocalDraftChanges();
454
+ // In PLAN mode, only proceed to draft state if:
455
+ // 1. User has approved the plan (userApprovedPlan === true)
456
+ // 2. There are actual file changes (hasLocalDraft === true)
457
+ if (mode === AiMode.PLAN) {
458
+ if (!userApprovedPlan || !hasLocalDraft) {
459
+ void transitionTo({
460
+ type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
353
461
  });
354
- services.clarkProfiler.getProfiler().endFrame(textTrack);
355
- textSpanActive = false;
356
- currentStepTextDeltas = [];
357
- }
358
- // Record text messages AFTER tool calls and reasoning
359
- if (step.text) {
360
- logRef.content += `[ASSISTANT TEXT] ${step.text}\n`;
361
- try {
362
- await services.chatSessionStore.recordAssistant({
363
- type: "text",
364
- text: step.text,
365
- ...(apiGroup && { group: apiGroup }),
366
- });
367
- }
368
- catch (error) {
369
- getLogger().error("Failed to record message from LLM", getErrorMeta(error));
370
- }
462
+ return;
371
463
  }
372
- logRef.content += `\n`;
373
- },
374
- onFinish: (result) => {
375
- context.endTurn(result.totalUsage);
376
- },
377
- }, clark.tracer, clark.logger);
378
- for await (const chunk of build.fullStream) {
379
- await processStreamChunk(chunk, clark, logRef);
380
- }
381
- if (firstTokenReceived) {
382
- services.clarkProfiler.endFrame();
383
- }
384
- services.clarkProfiler.endFrame();
385
- if (thinkingSpanActive) {
386
- services.clarkProfiler.getProfiler().endFrame("thinking");
387
- }
388
- if (textSpanActive) {
389
- services.clarkProfiler.getProfiler().endFrame("text_generation");
390
- }
391
- const endTimestamp = new Date().toISOString();
392
- logRef.content += `=== LLM CONVERSATION END [${conversationId}] ===\n`;
393
- logRef.content += `End Timestamp: ${endTimestamp}\n`;
394
- logRef.content += `Total Steps: ${stepCount}\n`;
395
- // Log final token usage summary using accumulated totals
396
- const finalTotalTokens = totalInputTokens + totalOutputTokens;
397
- logRef.content += `[TOTAL TOKEN USAGE] Input: ${totalInputTokens}, Output: ${totalOutputTokens}, Total: ${finalTotalTokens}`;
398
- if (totalCachedTokens > 0) {
399
- logRef.content += `, Cached: ${totalCachedTokens}`;
400
- }
401
- logRef.content += `\n`;
402
- try {
403
- // Create TokenRequestData object from accumulated step data (not AI SDK usage which is only final step)
404
- const requestTokenData = {
405
- requestId: conversationId.toString(),
406
- inputTokens: totalInputTokens,
407
- outputTokens: totalOutputTokens,
408
- totalTokens: totalInputTokens + totalOutputTokens,
409
- cachedInputTokens: totalCachedTokens,
410
- model: model.modelId,
411
- startTime: startTimestamp,
412
- endTime: endTimestamp,
413
- };
414
- await clark.context.peer?.call.aiPushTokenUsage(requestTokenData);
415
- }
416
- catch (error) {
417
- // Token tracking is non-critical - log error but don't fail the AI request
418
- getLogger().warn("Failed to send token usage data", error instanceof Error ? error.message : String(error));
419
- }
420
- // Save the complete log using saveGeneratedArtifact
421
- try {
422
- const logArtifact = {
423
- type: "file",
424
- filePath: `llm-conversation-${conversationId}.log`,
425
- content: logRef.content,
426
- };
427
- const stepId = `llm-conversation-${conversationId}`;
428
- await services.appShell.saveGeneratedArtifact(logArtifact, stepId, runTimestamp);
429
- getLogger().debug("LLM conversation log saved");
430
- }
431
- catch (error) {
432
- getLogger().error("Failed to save LLM conversation log", {
433
- error: {
434
- kind: "SaveLogError",
435
- message: error instanceof Error ? error.message : String(error),
436
- stack: error instanceof Error ? error.stack : undefined,
437
- },
438
- });
439
- }
440
- // Check if there are local draft changes
441
- const hasLocalDraft = await services.draftInterface.hasLocalDraftChanges();
442
- // In PLAN mode, only proceed to draft state if:
443
- // 1. User has approved the plan (userApprovedPlan === true)
444
- // 2. There are actual file changes (hasLocalDraft === true)
445
- if (mode === AiMode.PLAN) {
446
- if (!userApprovedPlan || !hasLocalDraft) {
464
+ // If plan is approved and there are changes, fall through to create draft
465
+ }
466
+ if (hasLocalDraft) {
467
+ void transitionTo({
468
+ type: V3_AGENT_FINISHED,
469
+ });
470
+ }
471
+ else {
447
472
  void transitionTo({
448
473
  type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
449
474
  });
450
- return;
451
475
  }
452
- // If plan is approved and there are changes, fall through to create draft
453
476
  }
454
- if (hasLocalDraft) {
455
- void transitionTo({
456
- type: V3_AGENT_FINISHED,
457
- });
458
- }
459
- else {
460
- void transitionTo({
461
- type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
462
- });
477
+ finally {
478
+ // Always release the context lock when done
479
+ contextHandle.release();
463
480
  }
464
481
  }
465
482
  }