@superblocksteam/vite-plugin-file-sync 2.0.43-next.13 → 2.0.43-next.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. package/dist/ai-service/agent/subagents/apis/api-executor.d.ts +57 -0
  2. package/dist/ai-service/agent/subagents/apis/api-executor.d.ts.map +1 -0
  3. package/dist/ai-service/agent/subagents/apis/api-executor.js +284 -0
  4. package/dist/ai-service/agent/subagents/apis/api-executor.js.map +1 -0
  5. package/dist/ai-service/agent/subagents/apis/context.d.ts +12 -0
  6. package/dist/ai-service/agent/subagents/apis/context.d.ts.map +1 -0
  7. package/dist/ai-service/agent/subagents/apis/context.js +18 -0
  8. package/dist/ai-service/agent/subagents/apis/context.js.map +1 -0
  9. package/dist/ai-service/agent/subagents/apis/generate-api-source.d.ts +37 -31
  10. package/dist/ai-service/agent/subagents/apis/generate-api-source.d.ts.map +1 -1
  11. package/dist/ai-service/agent/subagents/apis/generate-api-source.js +355 -479
  12. package/dist/ai-service/agent/subagents/apis/generate-api-source.js.map +1 -1
  13. package/dist/ai-service/agent/subagents/apis/state.d.ts +40 -0
  14. package/dist/ai-service/agent/subagents/apis/state.d.ts.map +1 -0
  15. package/dist/ai-service/agent/subagents/apis/state.js +25 -0
  16. package/dist/ai-service/agent/subagents/apis/state.js.map +1 -0
  17. package/dist/ai-service/agent/subagents/apis/types.d.ts +5 -0
  18. package/dist/ai-service/agent/subagents/apis/types.d.ts.map +1 -0
  19. package/dist/ai-service/agent/subagents/apis/types.js +2 -0
  20. package/dist/ai-service/agent/subagents/apis/types.js.map +1 -0
  21. package/dist/ai-service/agent/tool-message-utils.d.ts.map +1 -1
  22. package/dist/ai-service/agent/tool-message-utils.js +8 -24
  23. package/dist/ai-service/agent/tool-message-utils.js.map +1 -1
  24. package/dist/ai-service/agent/tools/apis/build-api.d.ts +20 -0
  25. package/dist/ai-service/agent/tools/apis/build-api.d.ts.map +1 -0
  26. package/dist/ai-service/agent/tools/apis/build-api.js +168 -0
  27. package/dist/ai-service/agent/tools/apis/build-api.js.map +1 -0
  28. package/dist/ai-service/agent/tools/apis/finalize-api.d.ts +15 -0
  29. package/dist/ai-service/agent/tools/apis/finalize-api.d.ts.map +1 -0
  30. package/dist/ai-service/agent/tools/apis/finalize-api.js +103 -0
  31. package/dist/ai-service/agent/tools/apis/finalize-api.js.map +1 -0
  32. package/dist/ai-service/agent/tools/build-finalize.d.ts +1 -22
  33. package/dist/ai-service/agent/tools/build-finalize.d.ts.map +1 -1
  34. package/dist/ai-service/agent/tools/build-finalize.js +27 -18
  35. package/dist/ai-service/agent/tools/build-finalize.js.map +1 -1
  36. package/dist/ai-service/agent/tools/integrations/execute-request.d.ts +1 -1
  37. package/dist/ai-service/agent/tools/integrations/execute-request.d.ts.map +1 -1
  38. package/dist/ai-service/agent/tools/integrations/index.d.ts +1 -1
  39. package/dist/ai-service/agent/tools/integrations/index.d.ts.map +1 -1
  40. package/dist/ai-service/agent/tools/integrations/index.js +1 -1
  41. package/dist/ai-service/agent/tools/integrations/index.js.map +1 -1
  42. package/dist/ai-service/agent/tools/integrations/internal.d.ts +1 -1
  43. package/dist/ai-service/agent/tools/integrations/internal.d.ts.map +1 -1
  44. package/dist/ai-service/agent/tools/integrations/internal.js +12 -1
  45. package/dist/ai-service/agent/tools/integrations/internal.js.map +1 -1
  46. package/dist/ai-service/agent/tools/integrations/metadata.d.ts +1 -1
  47. package/dist/ai-service/agent/tools/integrations/metadata.d.ts.map +1 -1
  48. package/dist/ai-service/agent/tools/integrations/metadata.js +1 -1
  49. package/dist/ai-service/agent/tools/integrations/metadata.js.map +1 -1
  50. package/dist/ai-service/agent/tools/integrations/run-code.d.ts +1 -1
  51. package/dist/ai-service/agent/tools/integrations/run-code.d.ts.map +1 -1
  52. package/dist/ai-service/agent/tools/study-current-app-state.d.ts +1 -0
  53. package/dist/ai-service/agent/tools/study-current-app-state.d.ts.map +1 -1
  54. package/dist/ai-service/agent/tools2/access-control.d.ts.map +1 -1
  55. package/dist/ai-service/agent/tools2/access-control.js +5 -2
  56. package/dist/ai-service/agent/tools2/access-control.js.map +1 -1
  57. package/dist/ai-service/agent/tools2/registry.d.ts +2 -1
  58. package/dist/ai-service/agent/tools2/registry.d.ts.map +1 -1
  59. package/dist/ai-service/agent/tools2/registry.js +4 -4
  60. package/dist/ai-service/agent/tools2/registry.js.map +1 -1
  61. package/dist/ai-service/agent/tools2/types.d.ts +17 -3
  62. package/dist/ai-service/agent/tools2/types.d.ts.map +1 -1
  63. package/dist/ai-service/agent/tools2/types.js +21 -0
  64. package/dist/ai-service/agent/tools2/types.js.map +1 -1
  65. package/dist/ai-service/agent/utils.d.ts +1 -0
  66. package/dist/ai-service/agent/utils.d.ts.map +1 -1
  67. package/dist/ai-service/agent/utils.js +1 -0
  68. package/dist/ai-service/agent/utils.js.map +1 -1
  69. package/dist/ai-service/integrations/metadata/database.d.ts.map +1 -1
  70. package/dist/ai-service/integrations/metadata/database.js +61 -20
  71. package/dist/ai-service/integrations/metadata/database.js.map +1 -1
  72. package/dist/ai-service/integrations/metadata/databricks.d.ts.map +1 -1
  73. package/dist/ai-service/integrations/metadata/databricks.js +5 -5
  74. package/dist/ai-service/integrations/metadata/databricks.js.map +1 -1
  75. package/dist/ai-service/integrations/metadata/graphql-based.d.ts +2 -0
  76. package/dist/ai-service/integrations/metadata/graphql-based.d.ts.map +1 -1
  77. package/dist/ai-service/integrations/metadata/graphql-based.js +95 -14
  78. package/dist/ai-service/integrations/metadata/graphql-based.js.map +1 -1
  79. package/dist/ai-service/integrations/metadata/llm-utils.d.ts +24 -0
  80. package/dist/ai-service/integrations/metadata/llm-utils.d.ts.map +1 -0
  81. package/dist/ai-service/integrations/metadata/llm-utils.js +45 -0
  82. package/dist/ai-service/integrations/metadata/llm-utils.js.map +1 -0
  83. package/dist/ai-service/integrations/store.d.ts +5 -5
  84. package/dist/ai-service/integrations/store.d.ts.map +1 -1
  85. package/dist/ai-service/integrations/store.js +52 -53
  86. package/dist/ai-service/integrations/store.js.map +1 -1
  87. package/dist/ai-service/llm/context/constants.d.ts +7 -6
  88. package/dist/ai-service/llm/context/constants.d.ts.map +1 -1
  89. package/dist/ai-service/llm/context/constants.js +7 -6
  90. package/dist/ai-service/llm/context/constants.js.map +1 -1
  91. package/dist/ai-service/llm/context/context-handle.d.ts +106 -0
  92. package/dist/ai-service/llm/context/context-handle.d.ts.map +1 -0
  93. package/dist/ai-service/llm/context/context-handle.js +134 -0
  94. package/dist/ai-service/llm/context/context-handle.js.map +1 -0
  95. package/dist/ai-service/llm/context/context-lock.d.ts +144 -0
  96. package/dist/ai-service/llm/context/context-lock.d.ts.map +1 -0
  97. package/dist/ai-service/llm/context/context-lock.js +221 -0
  98. package/dist/ai-service/llm/context/context-lock.js.map +1 -0
  99. package/dist/ai-service/llm/context/context.d.ts +18 -19
  100. package/dist/ai-service/llm/context/context.d.ts.map +1 -1
  101. package/dist/ai-service/llm/context/context.js +78 -129
  102. package/dist/ai-service/llm/context/context.js.map +1 -1
  103. package/dist/ai-service/llm/context/index.d.ts +4 -0
  104. package/dist/ai-service/llm/context/index.d.ts.map +1 -1
  105. package/dist/ai-service/llm/context/index.js +5 -0
  106. package/dist/ai-service/llm/context/index.js.map +1 -1
  107. package/dist/ai-service/llm/context/internal-types.d.ts +0 -2
  108. package/dist/ai-service/llm/context/internal-types.d.ts.map +1 -1
  109. package/dist/ai-service/llm/context/internal-types.js.map +1 -1
  110. package/dist/ai-service/llm/context/levels/l1.d.ts.map +1 -1
  111. package/dist/ai-service/llm/context/levels/l1.js +3 -5
  112. package/dist/ai-service/llm/context/levels/l1.js.map +1 -1
  113. package/dist/ai-service/llm/context/manager.d.ts +60 -11
  114. package/dist/ai-service/llm/context/manager.d.ts.map +1 -1
  115. package/dist/ai-service/llm/context/manager.js +113 -37
  116. package/dist/ai-service/llm/context/manager.js.map +1 -1
  117. package/dist/ai-service/llm/context/utils/content-compaction.d.ts +2 -2
  118. package/dist/ai-service/llm/context/utils/content-compaction.d.ts.map +1 -1
  119. package/dist/ai-service/llm/context/utils/content-compaction.js +6 -3
  120. package/dist/ai-service/llm/context/utils/content-compaction.js.map +1 -1
  121. package/dist/ai-service/llm/context/utils/index.d.ts +1 -1
  122. package/dist/ai-service/llm/context/utils/index.d.ts.map +1 -1
  123. package/dist/ai-service/llm/context/utils/index.js +1 -1
  124. package/dist/ai-service/llm/context/utils/index.js.map +1 -1
  125. package/dist/ai-service/llm/context/utils/message-utils.d.ts +17 -7
  126. package/dist/ai-service/llm/context/utils/message-utils.d.ts.map +1 -1
  127. package/dist/ai-service/llm/context/utils/message-utils.js +31 -18
  128. package/dist/ai-service/llm/context/utils/message-utils.js.map +1 -1
  129. package/dist/ai-service/llmobs/middleware/stream-text.d.ts.map +1 -1
  130. package/dist/ai-service/llmobs/middleware/stream-text.js +1 -0
  131. package/dist/ai-service/llmobs/middleware/stream-text.js.map +1 -1
  132. package/dist/ai-service/llmobs/tracer.d.ts +4 -0
  133. package/dist/ai-service/llmobs/tracer.d.ts.map +1 -1
  134. package/dist/ai-service/llmobs/tracer.js +11 -0
  135. package/dist/ai-service/llmobs/tracer.js.map +1 -1
  136. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ButtonPropsDocs.js +1 -1
  137. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/CheckboxPropsDocs.js +1 -1
  138. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ColumnPropsDocs.js +1 -1
  139. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ContainerPropsDocs.js +1 -1
  140. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/DatePickerPropsDocs.js +1 -1
  141. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/DropdownPropsDocs.js +1 -1
  142. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/IconPropsDocs.js +1 -1
  143. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ImagePropsDocs.js +1 -1
  144. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/InputPropsDocs.js +1 -1
  145. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/ModalPropsDocs.js +1 -1
  146. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/PagePropsDocs.js +1 -1
  147. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SectionPropsDocs.js +1 -1
  148. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SlideoutPropsDocs.js +1 -1
  149. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/SwitchPropsDocs.js +1 -1
  150. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/TablePropsDocs.js +1 -1
  151. package/dist/ai-service/prompt-builder-service/static-fragments/library-components/TextPropsDocs.js +1 -1
  152. package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/Dim.js +1 -1
  153. package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/EventFlow.js +1 -1
  154. package/dist/ai-service/prompt-builder-service/static-fragments/library-typedefs/TextStyleWithVariant.js +1 -1
  155. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/full-examples.js +1 -1
  156. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-api.js +1 -1
  157. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-components-rules.js +1 -1
  158. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-custom-components.js +1 -1
  159. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-data-filtering.js +1 -1
  160. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-event-flow.js +1 -1
  161. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-forms.js +1 -1
  162. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-layouts.js +1 -1
  163. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-page.js +1 -1
  164. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-rbac.js +1 -1
  165. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-routes.js +1 -1
  166. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-state.js +1 -1
  167. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/superblocks-theming-chakra-new.js +1 -1
  168. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-base.js +1 -1
  169. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-incremental.js +1 -1
  170. package/dist/ai-service/prompt-builder-service/static-fragments/platform-parts/system-specific-edit.js +1 -1
  171. package/dist/ai-service/state-machine/clark-fsm.d.ts +2 -0
  172. package/dist/ai-service/state-machine/clark-fsm.d.ts.map +1 -1
  173. package/dist/ai-service/state-machine/clark-fsm.js.map +1 -1
  174. package/dist/ai-service/state-machine/handlers/llm-generating.d.ts.map +1 -1
  175. package/dist/ai-service/state-machine/handlers/llm-generating.js +363 -336
  176. package/dist/ai-service/state-machine/handlers/llm-generating.js.map +1 -1
  177. package/dist/ai-service/util/stop-condition.d.ts +4 -1
  178. package/dist/ai-service/util/stop-condition.d.ts.map +1 -1
  179. package/dist/ai-service/util/stop-condition.js +14 -2
  180. package/dist/ai-service/util/stop-condition.js.map +1 -1
  181. package/dist/sync-service/download.d.ts.map +1 -1
  182. package/dist/sync-service/download.js +28 -7
  183. package/dist/sync-service/download.js.map +1 -1
  184. package/dist/util/logger.d.ts +13 -0
  185. package/dist/util/logger.d.ts.map +1 -1
  186. package/dist/util/logger.js +21 -0
  187. package/dist/util/logger.js.map +1 -1
  188. package/package.json +11 -9
  189. package/dist/ai-service/llm/context/logger.d.ts +0 -17
  190. package/dist/ai-service/llm/context/logger.d.ts.map +0 -1
  191. package/dist/ai-service/llm/context/logger.js +0 -26
  192. package/dist/ai-service/llm/context/logger.js.map +0 -1
@@ -1,5 +1,5 @@
1
1
  import { AiMode, } from "@superblocksteam/library-shared/types";
2
- import { hasToolCall, smoothStream, } from "ai";
2
+ import { smoothStream, } from "ai";
3
3
  import { getErrorMeta, getLogger } from "../../../util/logger.js";
4
4
  import { buildBaseSystemPrompt } from "../../agent/prompts/build-base-system-prompt.js";
5
5
  import { getToolCallArguments } from "../../agent/tool-message-utils.js";
@@ -46,11 +46,21 @@ function formatSummaryForAgents(latestSummary) {
46
46
  const buildUserMessage = (userPrompt, promptContext, mode, userApprovedPlan) => {
47
47
  const content = [];
48
48
  const focusedEntities = promptContext?.entities;
49
+ const selectedIntegrations = promptContext?.integrations;
50
+ // Add focused entities if present
49
51
  if (focusedEntities?.length) {
50
52
  content.push({
51
53
  type: "text",
52
- text: `<focused_entities>\nThe user has focused the editor on the following entities corresponding to the current app state. Attempt to constrain your actions to affect only these entities.\n${safeJsonStringify(focusedEntities)}\n</focused_entities>\n\n` +
53
- userPrompt,
54
+ text: `<focused_entities>\nThe user has focused the editor on the following entities corresponding to the current app state. Attempt to constrain your actions to affect only these entities.\n${safeJsonStringify(focusedEntities)}\n</focused_entities>\n\n`,
55
+ });
56
+ }
57
+ // Add selected integrations if present
58
+ if (selectedIntegrations?.length) {
59
+ // Omit metadata to reduce token usage - only include essential identifying information
60
+ const integrationsWithoutMetadata = selectedIntegrations.map(({ id, name, type }) => ({ id, name, type }));
61
+ content.push({
62
+ type: "text",
63
+ text: `<selected_integrations>\nThe user has explicitly selected the following integration configurations to use. You MUST use these specific integrations when creating or modifying APIs.\n${safeJsonStringify(integrationsWithoutMetadata)}\n</selected_integrations>\n\n`,
54
64
  });
55
65
  }
56
66
  if (promptContext?.attachments?.length) {
@@ -95,244 +105,285 @@ export const doLLMGenerating = (clark, services) => {
95
105
  const { abortController } = clark.context;
96
106
  const contextId = getContextId(clark, services);
97
107
  const contextOptions = clark.context.llmConfig?.contextOptions;
98
- const context = await services.contextManager.getContext(contextId, contextOptions);
99
- const mode = clark.context.currentMode;
100
- const userApprovedPlan = clark.context.userApprovedPlan;
101
- await chatSessionStore.getMessages();
102
- const latestSummary = await chatSessionStore.getLatestSummary();
103
- if (latestSummary) {
104
- context.seedIfEmpty({
105
- role: "user",
106
- content: `Reference the context from the previous exchange with the user, if relevant:\n\n${formatSummaryForAgents(latestSummary)}`,
107
- });
108
- }
109
- const systemPrompt = buildBaseSystemPrompt(mode);
110
- const system = {
111
- role: "system",
112
- content: systemPrompt,
113
- };
114
- context.setSystemPrompt(system);
115
- const prompt = buildUserMessage(userPrompt, promptContext, mode, userApprovedPlan);
116
- context.startTurn(prompt);
117
- const messages = context.getMessages();
118
- const model = services.llmProvider.modelForClassification("broad_edit");
119
- // Process LLM configuration up front so we can log it once at the top
120
- const llmConfig = clark.context.llmConfig;
121
- const disabledTools = clark.context.llmConfig?.disabledTools;
122
- const { headers, thinkingEnabled, thinkingBudgetTokens, interleavedThinking, providerOptions, } = processLLMConfig(llmConfig, 5000, // default budget tokens
123
- `LLM Generating (model=${model.modelId}, disabledTools=${disabledTools?.length ? disabledTools.join(",") : "none"})`);
124
- const conversationId = Date.now();
125
- const startTimestamp = new Date().toISOString();
126
- const runTimestamp = clark.context.runTimestamp || startTimestamp;
127
- // Initialize log content
128
- const logRef = {
129
- content: `=== LLM CONVERSATION START [${conversationId}] ===\n`,
130
- };
131
- logRef.content += `Timestamp: ${startTimestamp}\n`;
132
- logRef.content += `Model: ${model.modelId}\n`;
133
- // Log AI provider config
134
- logRef.content += `Provider Config: provider=${llmConfig?.provider || "anthropic (default)"}, thinking=${thinkingEnabled}, budget=${thinkingBudgetTokens}, interleaved=${interleavedThinking}`;
135
- if (disabledTools?.length) {
136
- logRef.content += `, disabledTools=${disabledTools.join(",")}`;
137
- }
138
- if (headers && Object.keys(headers).length > 0) {
139
- logRef.content += `, headers=${JSON.stringify(headers)}`;
140
- }
141
- logRef.content += `--- INPUT MESSAGES ---\n`;
142
- logRef.content += `[SYSTEM] ${systemPrompt}\n\n`;
143
- logRef.content += `[${system.role.toUpperCase()}] ${system.content}\n\n`;
144
- // Note: Input token count will be logged once LLM call begins
145
- // buildTools now returns filtered tools based on mode via registry
146
- const actualTools = await buildTools(clark, services, promptContext, logRef, disabledTools, mode);
147
- logRef.content += `Mode: ${mode}, Available tools: ${Object.keys(actualTools).length}\n`;
148
- logRef.content += `Tools: ${Object.keys(actualTools).join(", ")}\n\n`;
149
- let stepCount = 0;
150
- // Track cumulative token usage across all steps
151
- let totalInputTokens = 0;
152
- let totalOutputTokens = 0;
153
- let totalCachedTokens = 0;
154
- services.clarkProfiler.startLLMWaiting({
155
- messages: messages,
156
- model: model.modelId,
157
- });
158
- let firstTokenReceived = false;
159
- let thinkingSpanActive = false;
160
- let textSpanActive = false;
161
- const allReasoningDeltas = [];
162
- let currentStepReasoningDeltas = [];
163
- let currentStepTextDeltas = [];
164
- getLogger().info(`CURRENT_MODE: ${mode}`);
165
- const build = tracedStreamText({
166
- abortSignal: abortController?.signal,
167
- model,
168
- providerOptions,
169
- headers,
170
- experimental_transform: [smoothStream({ chunking: "line" })],
171
- messages,
172
- tools: actualTools,
173
- prepareStep: async (step) => {
174
- context.startStep();
175
- const messages = context.getMessages();
176
- return { ...step, messages };
108
+ // Generate a unique owner ID for this LLM generation session
109
+ const ownerId = `llm-generating-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
110
+ // Acquire exclusive context access
111
+ const contextHandle = await services.contextManager.acquireContext(contextId, ownerId, {
112
+ contextOptions,
113
+ acquireOptions: {
114
+ waitTimeoutMs: 30000, // Wait up to 30 seconds for context
115
+ lockTimeoutMs: 600000, // Hold lock for up to 10 minutes
116
+ retryIntervalMs: 100,
177
117
  },
178
- stopWhen: [
179
- hasToolCall("build_finalize"),
180
- hasToolSuccess("exitPlanMode"),
181
- hasToolSuccess("askMultiChoice"),
182
- ],
183
- onChunk: (chunkData) => {
184
- if (!firstTokenReceived) {
185
- firstTokenReceived = true;
186
- services.clarkProfiler.startLLMStreaming({
187
- firstChunk: chunkData.chunk.type === "text-delta"
188
- ? chunkData.chunk.text
189
- : `[${chunkData.chunk.type}]`,
190
- });
191
- }
192
- if (chunkData.chunk.type === "reasoning-delta") {
193
- const thinkingTrack = "thinking";
194
- services.clarkProfiler
195
- .getProfiler()
196
- .createTrack(thinkingTrack, "AI Thinking/Reasoning", "llm");
197
- allReasoningDeltas.push(chunkData.chunk.text);
198
- currentStepReasoningDeltas.push(chunkData.chunk.text);
199
- if (!thinkingSpanActive) {
200
- thinkingSpanActive = true;
201
- services.clarkProfiler
202
- .getProfiler()
203
- .startFrame(`Thinking Step ${stepCount + 1}`, thinkingTrack, {
204
- stepNumber: stepCount + 1,
205
- chunkType: chunkData.chunk.type,
118
+ });
119
+ try {
120
+ const context = contextHandle.context;
121
+ const mode = clark.context.currentMode;
122
+ const userApprovedPlan = clark.context.userApprovedPlan;
123
+ await chatSessionStore.getMessages();
124
+ const latestSummary = await chatSessionStore.getLatestSummary();
125
+ if (latestSummary) {
126
+ context.seedIfEmpty({
127
+ role: "user",
128
+ content: `Reference the context from the previous exchange with the user, if relevant:\n\n${formatSummaryForAgents(latestSummary)}`,
129
+ });
130
+ }
131
+ const systemPrompt = buildBaseSystemPrompt(mode);
132
+ const system = {
133
+ role: "system",
134
+ content: systemPrompt,
135
+ };
136
+ context.setSystemPrompt(system);
137
+ const prompt = buildUserMessage(userPrompt, promptContext, mode, userApprovedPlan);
138
+ context.startTurn(prompt);
139
+ const messages = context.getMessages();
140
+ const model = services.llmProvider.modelForClassification("broad_edit");
141
+ // Process LLM configuration up front so we can log it once at the top
142
+ const llmConfig = clark.context.llmConfig;
143
+ const disabledTools = clark.context.llmConfig?.disabledTools;
144
+ const { headers, thinkingEnabled, thinkingBudgetTokens, interleavedThinking, providerOptions, } = processLLMConfig(llmConfig, 5000, // default budget tokens
145
+ `LLM Generating (model=${model.modelId}, disabledTools=${disabledTools?.length ? disabledTools.join(",") : "none"})`);
146
+ const conversationId = Date.now();
147
+ const startTimestamp = new Date().toISOString();
148
+ const runTimestamp = clark.context.runTimestamp || startTimestamp;
149
+ // Initialize log content
150
+ const logRef = {
151
+ content: `=== LLM CONVERSATION START [${conversationId}] ===\n`,
152
+ };
153
+ logRef.content += `Timestamp: ${startTimestamp}\n`;
154
+ logRef.content += `Model: ${model.modelId}\n`;
155
+ // Log AI provider config
156
+ logRef.content += `Provider Config: provider=${llmConfig?.provider || "anthropic (default)"}, thinking=${thinkingEnabled}, budget=${thinkingBudgetTokens}, interleaved=${interleavedThinking}`;
157
+ if (disabledTools?.length) {
158
+ logRef.content += `, disabledTools=${disabledTools.join(",")}`;
159
+ }
160
+ if (headers && Object.keys(headers).length > 0) {
161
+ logRef.content += `, headers=${JSON.stringify(headers)}`;
162
+ }
163
+ logRef.content += `--- INPUT MESSAGES ---\n`;
164
+ logRef.content += `[SYSTEM] ${systemPrompt}\n\n`;
165
+ logRef.content += `[${system.role.toUpperCase()}] ${system.content}\n\n`;
166
+ // Note: Input token count will be logged once LLM call begins
167
+ // buildTools now returns filtered tools based on mode via registry
168
+ const actualTools = await buildTools(clark, services, promptContext, logRef, disabledTools, mode);
169
+ logRef.content += `Mode: ${mode}, Available tools: ${Object.keys(actualTools).length}\n`;
170
+ logRef.content += `Tools: ${Object.keys(actualTools).join(", ")}\n\n`;
171
+ let stepCount = 0;
172
+ // Track cumulative token usage across all steps
173
+ let totalInputTokens = 0;
174
+ let totalOutputTokens = 0;
175
+ let totalCachedTokens = 0;
176
+ services.clarkProfiler.startLLMWaiting({
177
+ messages: messages,
178
+ model: model.modelId,
179
+ });
180
+ let firstTokenReceived = false;
181
+ let thinkingSpanActive = false;
182
+ let textSpanActive = false;
183
+ const allReasoningDeltas = [];
184
+ let currentStepReasoningDeltas = [];
185
+ let currentStepTextDeltas = [];
186
+ getLogger().info(`CURRENT_MODE: ${mode}`);
187
+ const build = tracedStreamText({
188
+ abortSignal: abortController?.signal,
189
+ model,
190
+ providerOptions,
191
+ headers,
192
+ experimental_transform: [smoothStream({ chunking: "line" })],
193
+ messages,
194
+ tools: actualTools,
195
+ prepareStep: async (step) => {
196
+ context.startStep();
197
+ const messages = context.getMessages();
198
+ return { ...step, messages };
199
+ },
200
+ stopWhen: [
201
+ hasToolSuccess("build_finalize"),
202
+ hasToolSuccess("exitPlanMode"),
203
+ hasToolSuccess("askMultiChoice"),
204
+ ],
205
+ onChunk: (chunkData) => {
206
+ if (!firstTokenReceived) {
207
+ firstTokenReceived = true;
208
+ services.clarkProfiler.startLLMStreaming({
209
+ firstChunk: chunkData.chunk.type === "text-delta"
210
+ ? chunkData.chunk.text
211
+ : `[${chunkData.chunk.type}]`,
206
212
  });
207
213
  }
208
- }
209
- if (chunkData.chunk.type === "text-delta") {
210
- const textTrack = "text_generation";
211
- services.clarkProfiler
212
- .getProfiler()
213
- .createTrack(textTrack, "Text Generation", "llm");
214
- currentStepTextDeltas.push(chunkData.chunk.text);
215
- if (!textSpanActive) {
216
- textSpanActive = true;
214
+ if (chunkData.chunk.type === "reasoning-delta") {
215
+ const thinkingTrack = "thinking";
217
216
  services.clarkProfiler
218
217
  .getProfiler()
219
- .startFrame(`Text Generation Step ${stepCount + 1}`, textTrack, {
220
- stepNumber: stepCount + 1,
221
- firstTextDelta: chunkData.chunk.text.slice(0, 50) +
222
- (chunkData.chunk.text.length > 50 ? "..." : ""),
223
- });
224
- }
225
- }
226
- },
227
- onStepFinish: async (step) => {
228
- stepCount++;
229
- const stepTimestamp = new Date().toISOString();
230
- context.endStep(step.response.messages, step.usage);
231
- logRef.content += `--- OUTPUT STEP ${stepCount} [${stepTimestamp}] ---\n`;
232
- if (step.reasoning && thinkingSpanActive) {
233
- const thinkingTrack = "thinking";
234
- services.clarkProfiler
235
- .getProfiler()
236
- .updateActiveFrameArgs(thinkingTrack, {
237
- completeReasoningText: currentStepReasoningDeltas.join(" "),
238
- reasoningLength: step.reasoning.length,
239
- stepComplete: true,
240
- });
241
- services.clarkProfiler.getProfiler().endFrame(thinkingTrack);
242
- thinkingSpanActive = false;
243
- currentStepReasoningDeltas = [];
244
- }
245
- // Log token usage for this step and accumulate totals
246
- if (step.usage) {
247
- const stepInputTokens = step.usage.inputTokens ?? 0;
248
- const stepOutputTokens = step.usage.outputTokens ?? 0;
249
- const stepCachedTokens = step.usage.cachedInputTokens ?? 0;
250
- // Accumulate totals
251
- totalInputTokens += stepInputTokens;
252
- totalOutputTokens += stepOutputTokens;
253
- totalCachedTokens += stepCachedTokens;
254
- logRef.content += `[TOKEN USAGE] Input: ${stepInputTokens}, Output: ${stepOutputTokens}, Total: ${step.usage.totalTokens ?? 0}`;
255
- if (stepCachedTokens) {
256
- logRef.content += `, Cached: ${stepCachedTokens}`;
218
+ .createTrack(thinkingTrack, "AI Thinking/Reasoning", "llm");
219
+ allReasoningDeltas.push(chunkData.chunk.text);
220
+ currentStepReasoningDeltas.push(chunkData.chunk.text);
221
+ if (!thinkingSpanActive) {
222
+ thinkingSpanActive = true;
223
+ services.clarkProfiler
224
+ .getProfiler()
225
+ .startFrame(`Thinking Step ${stepCount + 1}`, thinkingTrack, {
226
+ stepNumber: stepCount + 1,
227
+ chunkType: chunkData.chunk.type,
228
+ });
229
+ }
257
230
  }
258
- logRef.content += `\n`;
259
- }
260
- const apiToolCall = step.toolCalls.find((toolCall) => toolCall.toolName === "build_generateApiSource");
261
- const apiName = apiToolCall?.input?.apiName;
262
- const apiGroup = apiName ? `api-${apiName}` : undefined;
263
- // Record tool calls FIRST before other messages to ensure proper ordering
264
- const toolsCalled = step.content
265
- .filter((c) => c.type === "tool-result")
266
- .map((c) => ({
267
- toolName: c.toolName,
268
- input: JSON.stringify(c.input),
269
- output: JSON.stringify(c.output, null, 2),
270
- }));
271
- if (toolsCalled.length > 0) {
272
- logRef.content += `[TOOLS CALLED]\n`;
273
- toolsCalled.forEach((tool, idx) => {
274
- logRef.content += ` Tool ${idx + 1}: ${tool.toolName}\n`;
275
- logRef.content += ` Input: ${tool.input}\n`;
276
- logRef.content += ` Output: ${tool.output}\n`;
277
- });
278
- try {
279
- await Promise.all(step.toolCalls.map(async (toolCall) => {
280
- const args = await getToolCallArguments(toolCall.toolName, toolCall.input, clark);
281
- await services.chatSessionStore.recordAssistant({
282
- type: "tool",
283
- tool: toolCall.toolName,
284
- args: args,
285
- ...(apiGroup && { group: apiGroup }),
231
+ if (chunkData.chunk.type === "text-delta") {
232
+ const textTrack = "text_generation";
233
+ services.clarkProfiler
234
+ .getProfiler()
235
+ .createTrack(textTrack, "Text Generation", "llm");
236
+ currentStepTextDeltas.push(chunkData.chunk.text);
237
+ if (!textSpanActive) {
238
+ textSpanActive = true;
239
+ services.clarkProfiler
240
+ .getProfiler()
241
+ .startFrame(`Text Generation Step ${stepCount + 1}`, textTrack, {
242
+ stepNumber: stepCount + 1,
243
+ firstTextDelta: chunkData.chunk.text.slice(0, 50) +
244
+ (chunkData.chunk.text.length > 50 ? "..." : ""),
286
245
  });
287
- }));
246
+ }
288
247
  }
289
- catch (error) {
290
- getLogger().error("Failed to record tool calls", getErrorMeta(error));
248
+ },
249
+ onStepFinish: async (step) => {
250
+ stepCount++;
251
+ const stepTimestamp = new Date().toISOString();
252
+ context.endStep(step.response.messages, step.usage);
253
+ logRef.content += `--- OUTPUT STEP ${stepCount} [${stepTimestamp}] ---\n`;
254
+ if (step.reasoning && thinkingSpanActive) {
255
+ const thinkingTrack = "thinking";
256
+ services.clarkProfiler
257
+ .getProfiler()
258
+ .updateActiveFrameArgs(thinkingTrack, {
259
+ completeReasoningText: currentStepReasoningDeltas.join(" "),
260
+ reasoningLength: step.reasoning.length,
261
+ stepComplete: true,
262
+ });
263
+ services.clarkProfiler.getProfiler().endFrame(thinkingTrack);
264
+ thinkingSpanActive = false;
265
+ currentStepReasoningDeltas = [];
291
266
  }
292
- // Record profiling events
293
- toolsCalled.forEach((tool, idx) => {
294
- let parsedInput, parsedOutput;
295
- try {
296
- parsedInput = JSON.parse(tool.input);
297
- }
298
- catch {
299
- parsedInput = tool.input;
267
+ // Log token usage for this step and accumulate totals
268
+ if (step.usage) {
269
+ const stepInputTokens = step.usage.inputTokens ?? 0;
270
+ const stepOutputTokens = step.usage.outputTokens ?? 0;
271
+ const stepCachedTokens = step.usage.cachedInputTokens ?? 0;
272
+ // Accumulate totals
273
+ totalInputTokens += stepInputTokens;
274
+ totalOutputTokens += stepOutputTokens;
275
+ totalCachedTokens += stepCachedTokens;
276
+ logRef.content += `[TOKEN USAGE] Input: ${stepInputTokens}, Output: ${stepOutputTokens}, Total: ${step.usage.totalTokens ?? 0}`;
277
+ if (stepCachedTokens) {
278
+ logRef.content += `, Cached: ${stepCachedTokens}`;
300
279
  }
280
+ logRef.content += `\n`;
281
+ }
282
+ const apiToolCall = step.toolCalls.find((toolCall) => toolCall.toolName === "build_generateApiSource");
283
+ const apiName = apiToolCall?.input?.apiName;
284
+ const apiGroup = apiName ? `api-${apiName}` : undefined;
285
+ // Record tool calls FIRST before other messages to ensure proper ordering
286
+ const toolsCalled = step.content
287
+ .filter((c) => c.type === "tool-result")
288
+ .map((c) => ({
289
+ toolName: c.toolName,
290
+ input: JSON.stringify(c.input),
291
+ output: JSON.stringify(c.output, null, 2),
292
+ }));
293
+ if (toolsCalled.length > 0) {
294
+ logRef.content += `[TOOLS CALLED]\n`;
295
+ toolsCalled.forEach((tool, idx) => {
296
+ logRef.content += ` Tool ${idx + 1}: ${tool.toolName}\n`;
297
+ logRef.content += ` Input: ${tool.input}\n`;
298
+ logRef.content += ` Output: ${tool.output}\n`;
299
+ });
301
300
  try {
302
- parsedOutput = JSON.parse(tool.output);
301
+ await Promise.all(step.toolCalls.map(async (toolCall) => {
302
+ const args = await getToolCallArguments(toolCall.toolName, toolCall.input, clark);
303
+ await services.chatSessionStore.recordAssistant({
304
+ type: "tool",
305
+ tool: toolCall.toolName,
306
+ args: args,
307
+ ...(apiGroup && { group: apiGroup }),
308
+ });
309
+ }));
303
310
  }
304
- catch {
305
- parsedOutput = tool.output;
311
+ catch (error) {
312
+ getLogger().error("Failed to record tool calls", getErrorMeta(error));
306
313
  }
314
+ // Record profiling events
315
+ toolsCalled.forEach((tool, idx) => {
316
+ let parsedInput, parsedOutput;
317
+ try {
318
+ parsedInput = JSON.parse(tool.input);
319
+ }
320
+ catch {
321
+ parsedInput = tool.input;
322
+ }
323
+ try {
324
+ parsedOutput = JSON.parse(tool.output);
325
+ }
326
+ catch {
327
+ parsedOutput = tool.output;
328
+ }
329
+ services.clarkProfiler
330
+ .getProfiler()
331
+ .addInstantEvent(`Tool Call: ${tool.toolName}`, "llm", {
332
+ step: stepCount,
333
+ toolIndex: idx + 1,
334
+ toolName: tool.toolName,
335
+ input: parsedInput,
336
+ output: parsedOutput,
337
+ inputSize: tool.input.length,
338
+ outputSize: tool.output.length,
339
+ });
340
+ });
341
+ }
342
+ // Record reasoning messages AFTER tool calls
343
+ if (step.reasoning) {
344
+ const reasoningLines = [
345
+ "[REASONING]",
346
+ ...step.reasoning.map(({ text }) => text),
347
+ "",
348
+ ];
349
+ logRef.content += reasoningLines.join("\n");
350
+ const reasoningText = step.reasoning
351
+ .map(({ text }) => text)
352
+ .join("");
353
+ if (reasoningText) {
354
+ try {
355
+ await services.chatSessionStore.recordAssistant({
356
+ type: "reasoning",
357
+ text: reasoningText,
358
+ ...(apiGroup && { group: apiGroup }),
359
+ });
360
+ }
361
+ catch (error) {
362
+ getLogger().error("Failed to record message from LLM", getErrorMeta(error));
363
+ }
364
+ }
365
+ }
366
+ if (step.text && textSpanActive) {
367
+ const textTrack = "text_generation";
307
368
  services.clarkProfiler
308
369
  .getProfiler()
309
- .addInstantEvent(`Tool Call: ${tool.toolName}`, "llm", {
310
- step: stepCount,
311
- toolIndex: idx + 1,
312
- toolName: tool.toolName,
313
- input: parsedInput,
314
- output: parsedOutput,
315
- inputSize: tool.input.length,
316
- outputSize: tool.output.length,
370
+ .updateActiveFrameArgs(textTrack, {
371
+ completeTextContent: currentStepTextDeltas.join(""),
372
+ finalText: step.text,
373
+ textLength: step.text.length,
374
+ stepComplete: true,
317
375
  });
318
- });
319
- }
320
- // Record reasoning messages AFTER tool calls
321
- if (step.reasoning) {
322
- const reasoningLines = [
323
- "[REASONING]",
324
- ...step.reasoning.map(({ text }) => text),
325
- "",
326
- ];
327
- logRef.content += reasoningLines.join("\n");
328
- const reasoningText = step.reasoning
329
- .map(({ text }) => text)
330
- .join("");
331
- if (reasoningText) {
376
+ services.clarkProfiler.getProfiler().endFrame(textTrack);
377
+ textSpanActive = false;
378
+ currentStepTextDeltas = [];
379
+ }
380
+ // Record text messages AFTER tool calls and reasoning
381
+ if (step.text) {
382
+ logRef.content += `[ASSISTANT TEXT] ${step.text}\n`;
332
383
  try {
333
384
  await services.chatSessionStore.recordAssistant({
334
- type: "reasoning",
335
- text: reasoningText,
385
+ type: "text",
386
+ text: step.text,
336
387
  ...(apiGroup && { group: apiGroup }),
337
388
  });
338
389
  }
@@ -340,126 +391,102 @@ export const doLLMGenerating = (clark, services) => {
340
391
  getLogger().error("Failed to record message from LLM", getErrorMeta(error));
341
392
  }
342
393
  }
343
- }
344
- if (step.text && textSpanActive) {
345
- const textTrack = "text_generation";
346
- services.clarkProfiler
347
- .getProfiler()
348
- .updateActiveFrameArgs(textTrack, {
349
- completeTextContent: currentStepTextDeltas.join(""),
350
- finalText: step.text,
351
- textLength: step.text.length,
352
- stepComplete: true,
394
+ logRef.content += `\n`;
395
+ },
396
+ onFinish: (result) => {
397
+ context.endTurn(result.totalUsage);
398
+ },
399
+ }, clark.tracer, clark.logger);
400
+ for await (const chunk of build.fullStream) {
401
+ await processStreamChunk(chunk, clark, logRef);
402
+ }
403
+ if (firstTokenReceived) {
404
+ services.clarkProfiler.endFrame();
405
+ }
406
+ services.clarkProfiler.endFrame();
407
+ if (thinkingSpanActive) {
408
+ services.clarkProfiler.getProfiler().endFrame("thinking");
409
+ }
410
+ if (textSpanActive) {
411
+ services.clarkProfiler.getProfiler().endFrame("text_generation");
412
+ }
413
+ const endTimestamp = new Date().toISOString();
414
+ logRef.content += `=== LLM CONVERSATION END [${conversationId}] ===\n`;
415
+ logRef.content += `End Timestamp: ${endTimestamp}\n`;
416
+ logRef.content += `Total Steps: ${stepCount}\n`;
417
+ // Log final token usage summary using accumulated totals
418
+ const finalTotalTokens = totalInputTokens + totalOutputTokens;
419
+ logRef.content += `[TOTAL TOKEN USAGE] Input: ${totalInputTokens}, Output: ${totalOutputTokens}, Total: ${finalTotalTokens}`;
420
+ if (totalCachedTokens > 0) {
421
+ logRef.content += `, Cached: ${totalCachedTokens}`;
422
+ }
423
+ logRef.content += `\n`;
424
+ try {
425
+ // Create TokenRequestData object from accumulated step data (not AI SDK usage which is only final step)
426
+ const requestTokenData = {
427
+ requestId: conversationId.toString(),
428
+ inputTokens: totalInputTokens,
429
+ outputTokens: totalOutputTokens,
430
+ totalTokens: totalInputTokens + totalOutputTokens,
431
+ cachedInputTokens: totalCachedTokens,
432
+ model: model.modelId,
433
+ startTime: startTimestamp,
434
+ endTime: endTimestamp,
435
+ };
436
+ await clark.context.peer?.call.aiPushTokenUsage(requestTokenData);
437
+ }
438
+ catch (error) {
439
+ // Token tracking is non-critical - log error but don't fail the AI request
440
+ getLogger().warn("Failed to send token usage data", error instanceof Error ? error.message : String(error));
441
+ }
442
+ // Save the complete log using saveGeneratedArtifact
443
+ try {
444
+ const logArtifact = {
445
+ type: "file",
446
+ filePath: `llm-conversation-${conversationId}.log`,
447
+ content: logRef.content,
448
+ };
449
+ const stepId = `llm-conversation-${conversationId}`;
450
+ await services.appShell.saveGeneratedArtifact(logArtifact, stepId, runTimestamp);
451
+ getLogger().debug("LLM conversation log saved");
452
+ }
453
+ catch (error) {
454
+ getLogger().error("Failed to save LLM conversation log", {
455
+ error: {
456
+ kind: "SaveLogError",
457
+ message: error instanceof Error ? error.message : String(error),
458
+ stack: error instanceof Error ? error.stack : undefined,
459
+ },
460
+ });
461
+ }
462
+ // Check if there are local draft changes
463
+ const hasLocalDraft = await services.draftInterface.hasLocalDraftChanges();
464
+ // In PLAN mode, only proceed to draft state if:
465
+ // 1. User has approved the plan (userApprovedPlan === true)
466
+ // 2. There are actual file changes (hasLocalDraft === true)
467
+ if (mode === AiMode.PLAN) {
468
+ if (!userApprovedPlan || !hasLocalDraft) {
469
+ void transitionTo({
470
+ type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
353
471
  });
354
- services.clarkProfiler.getProfiler().endFrame(textTrack);
355
- textSpanActive = false;
356
- currentStepTextDeltas = [];
357
- }
358
- // Record text messages AFTER tool calls and reasoning
359
- if (step.text) {
360
- logRef.content += `[ASSISTANT TEXT] ${step.text}\n`;
361
- try {
362
- await services.chatSessionStore.recordAssistant({
363
- type: "text",
364
- text: step.text,
365
- ...(apiGroup && { group: apiGroup }),
366
- });
367
- }
368
- catch (error) {
369
- getLogger().error("Failed to record message from LLM", getErrorMeta(error));
370
- }
472
+ return;
371
473
  }
372
- logRef.content += `\n`;
373
- },
374
- onFinish: (result) => {
375
- context.endTurn(result.totalUsage);
376
- },
377
- }, clark.tracer, clark.logger);
378
- for await (const chunk of build.fullStream) {
379
- await processStreamChunk(chunk, clark, logRef);
380
- }
381
- if (firstTokenReceived) {
382
- services.clarkProfiler.endFrame();
383
- }
384
- services.clarkProfiler.endFrame();
385
- if (thinkingSpanActive) {
386
- services.clarkProfiler.getProfiler().endFrame("thinking");
387
- }
388
- if (textSpanActive) {
389
- services.clarkProfiler.getProfiler().endFrame("text_generation");
390
- }
391
- const endTimestamp = new Date().toISOString();
392
- logRef.content += `=== LLM CONVERSATION END [${conversationId}] ===\n`;
393
- logRef.content += `End Timestamp: ${endTimestamp}\n`;
394
- logRef.content += `Total Steps: ${stepCount}\n`;
395
- // Log final token usage summary using accumulated totals
396
- const finalTotalTokens = totalInputTokens + totalOutputTokens;
397
- logRef.content += `[TOTAL TOKEN USAGE] Input: ${totalInputTokens}, Output: ${totalOutputTokens}, Total: ${finalTotalTokens}`;
398
- if (totalCachedTokens > 0) {
399
- logRef.content += `, Cached: ${totalCachedTokens}`;
400
- }
401
- logRef.content += `\n`;
402
- try {
403
- // Create TokenRequestData object from accumulated step data (not AI SDK usage which is only final step)
404
- const requestTokenData = {
405
- requestId: conversationId.toString(),
406
- inputTokens: totalInputTokens,
407
- outputTokens: totalOutputTokens,
408
- totalTokens: totalInputTokens + totalOutputTokens,
409
- cachedInputTokens: totalCachedTokens,
410
- model: model.modelId,
411
- startTime: startTimestamp,
412
- endTime: endTimestamp,
413
- };
414
- await clark.context.peer?.call.aiPushTokenUsage(requestTokenData);
415
- }
416
- catch (error) {
417
- // Token tracking is non-critical - log error but don't fail the AI request
418
- getLogger().warn("Failed to send token usage data", error instanceof Error ? error.message : String(error));
419
- }
420
- // Save the complete log using saveGeneratedArtifact
421
- try {
422
- const logArtifact = {
423
- type: "file",
424
- filePath: `llm-conversation-${conversationId}.log`,
425
- content: logRef.content,
426
- };
427
- const stepId = `llm-conversation-${conversationId}`;
428
- await services.appShell.saveGeneratedArtifact(logArtifact, stepId, runTimestamp);
429
- getLogger().debug("LLM conversation log saved");
430
- }
431
- catch (error) {
432
- getLogger().error("Failed to save LLM conversation log", {
433
- error: {
434
- kind: "SaveLogError",
435
- message: error instanceof Error ? error.message : String(error),
436
- stack: error instanceof Error ? error.stack : undefined,
437
- },
438
- });
439
- }
440
- // Check if there are local draft changes
441
- const hasLocalDraft = await services.draftInterface.hasLocalDraftChanges();
442
- // In PLAN mode, only proceed to draft state if:
443
- // 1. User has approved the plan (userApprovedPlan === true)
444
- // 2. There are actual file changes (hasLocalDraft === true)
445
- if (mode === AiMode.PLAN) {
446
- if (!userApprovedPlan || !hasLocalDraft) {
474
+ // If plan is approved and there are changes, fall through to create draft
475
+ }
476
+ if (hasLocalDraft) {
477
+ void transitionTo({
478
+ type: V3_AGENT_FINISHED,
479
+ });
480
+ }
481
+ else {
447
482
  void transitionTo({
448
483
  type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
449
484
  });
450
- return;
451
485
  }
452
- // If plan is approved and there are changes, fall through to create draft
453
486
  }
454
- if (hasLocalDraft) {
455
- void transitionTo({
456
- type: V3_AGENT_FINISHED,
457
- });
458
- }
459
- else {
460
- void transitionTo({
461
- type: APP_RUNTIME_UPDATED_WITHOUT_EDITS,
462
- });
487
+ finally {
488
+ // Always release the context lock when done
489
+ contextHandle.release();
463
490
  }
464
491
  }
465
492
  }