@ai-sdk/devtools 0.0.0-4115c213-20260122152721

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,392 @@
1
+ import {
2
+ type LanguageModelV3FinishReason,
3
+ type LanguageModelV3Usage,
4
+ type LanguageModelV3Middleware,
5
+ type LanguageModelV3StreamPart,
6
+ } from '@ai-sdk/provider';
7
+ import {
8
+ createRun,
9
+ createStep,
10
+ updateStepResult,
11
+ notifyServerAsync,
12
+ } from './db.js';
13
+
14
+ const generateId = () => crypto.randomUUID();
15
+
16
+ // Track active streaming steps for cleanup on process exit
17
+ const activeSteps = new Map<
18
+ string,
19
+ {
20
+ startTime: number;
21
+ collectedOutput: unknown;
22
+ request: unknown;
23
+ fullStreamChunks: unknown[];
24
+ rawChunks: unknown[];
25
+ }
26
+ >();
27
+
28
+ // Handle process termination signals
29
+ let signalHandlersRegistered = false;
30
+ const registerSignalHandlers = () => {
31
+ if (signalHandlersRegistered) return;
32
+ signalHandlersRegistered = true;
33
+
34
+ const cleanup = async () => {
35
+ if (activeSteps.size === 0) return;
36
+
37
+ const promises = Array.from(activeSteps.entries()).map(
38
+ async ([stepId, data]) => {
39
+ const durationMs = Date.now() - data.startTime;
40
+ await updateStepResult(stepId, {
41
+ duration_ms: durationMs,
42
+ output: JSON.stringify(data.collectedOutput),
43
+ usage: null,
44
+ error: 'Request aborted',
45
+ raw_request:
46
+ data.request &&
47
+ typeof data.request === 'object' &&
48
+ 'body' in data.request
49
+ ? JSON.stringify((data.request as { body: unknown }).body)
50
+ : null,
51
+ raw_response: JSON.stringify(data.fullStreamChunks),
52
+ raw_chunks: JSON.stringify(data.rawChunks),
53
+ });
54
+ },
55
+ );
56
+ await Promise.all(promises);
57
+
58
+ // Wait for the server notification to complete before process exits
59
+ await notifyServerAsync('step-update');
60
+ };
61
+
62
+ process.on('SIGINT', () => {
63
+ cleanup().then(() => process.exit(130));
64
+ });
65
+
66
+ process.on('SIGTERM', () => {
67
+ cleanup().then(() => process.exit(143));
68
+ });
69
+ };
70
+
71
+ /**
72
+ * Generate a unique run ID with timestamp prefix for sorting.
73
+ */
74
+ const generateRunId = (): string => {
75
+ const now = new Date();
76
+ const timestamp = now
77
+ .toISOString()
78
+ .replace(/[-:T.Z]/g, '')
79
+ .slice(0, 17);
80
+ const uniqueId = crypto.randomUUID().slice(0, 8);
81
+ return `${timestamp}-${uniqueId}`;
82
+ };
83
+
84
+ /**
85
+ * Factory function that creates a devtools middleware instance.
86
+ * Each call generates a unique run ID, so all steps within a single
87
+ * streamText/generateText call share the same run.
88
+ *
89
+ * Usage:
90
+ * ```ts
91
+ * const result = streamText({
92
+ * model: wrapLanguageModel({
93
+ * middleware: devToolsMiddleware(),
94
+ * model: yourModel,
95
+ * }),
96
+ * prompt: "...",
97
+ * });
98
+ * ```
99
+ */
100
+ export const devToolsMiddleware = (): LanguageModelV3Middleware => {
101
+ if (process.env.NODE_ENV === 'production') {
102
+ throw new Error(
103
+ '@ai-sdk/devtools should not be used in production. ' +
104
+ 'Remove devToolsMiddleware from your model configuration for production builds.',
105
+ );
106
+ }
107
+
108
+ // Register signal handlers once for cleanup on process exit
109
+ registerSignalHandlers();
110
+
111
+ const runId = generateRunId();
112
+ let runCreated = false;
113
+ let stepCounter = 0;
114
+
115
+ const ensureRunCreated = async () => {
116
+ if (!runCreated) {
117
+ await createRun(runId);
118
+ runCreated = true;
119
+ }
120
+ };
121
+
122
+ const getNextStepNumber = () => {
123
+ stepCounter++;
124
+ return stepCounter;
125
+ };
126
+
127
+ return {
128
+ specificationVersion: 'v3',
129
+
130
+ wrapGenerate: async ({ doGenerate, params, model }) => {
131
+ const startTime = Date.now();
132
+ const stepId = generateId();
133
+ const stepNumber = getNextStepNumber();
134
+ await ensureRunCreated();
135
+
136
+ // Log step start
137
+ await createStep({
138
+ id: stepId,
139
+ run_id: runId,
140
+ step_number: stepNumber,
141
+ type: 'generate',
142
+ model_id: model.modelId,
143
+ // @ts-expect-error broken type
144
+ provider: model.config?.provider,
145
+ started_at: new Date().toISOString(),
146
+ input: JSON.stringify({
147
+ prompt: params.prompt,
148
+ tools: params.tools,
149
+ toolChoice: params.toolChoice,
150
+ maxOutputTokens: params.maxOutputTokens,
151
+ temperature: params.temperature,
152
+ topP: params.topP,
153
+ topK: params.topK,
154
+ presencePenalty: params.presencePenalty,
155
+ frequencyPenalty: params.frequencyPenalty,
156
+ seed: params.seed,
157
+ responseFormat: params.responseFormat,
158
+ }),
159
+ provider_options: params.providerOptions
160
+ ? JSON.stringify(params.providerOptions)
161
+ : null,
162
+ });
163
+
164
+ try {
165
+ const result = await doGenerate();
166
+ const durationMs = Date.now() - startTime;
167
+
168
+ await updateStepResult(stepId, {
169
+ duration_ms: durationMs,
170
+ output: JSON.stringify({
171
+ content: result.content,
172
+ finishReason: result.finishReason,
173
+ response: result.response,
174
+ }),
175
+ usage: result.usage ? JSON.stringify(result.usage) : null,
176
+ error: null,
177
+ raw_request: result.request?.body
178
+ ? JSON.stringify(result.request.body)
179
+ : null,
180
+ raw_response: result.response?.body
181
+ ? JSON.stringify(result.response.body)
182
+ : null,
183
+ });
184
+
185
+ return result;
186
+ } catch (error) {
187
+ const durationMs = Date.now() - startTime;
188
+ await updateStepResult(stepId, {
189
+ duration_ms: durationMs,
190
+ output: null,
191
+ usage: null,
192
+ error: error instanceof Error ? error.message : String(error),
193
+ raw_request: null,
194
+ raw_response: null,
195
+ });
196
+ throw error;
197
+ }
198
+ },
199
+
200
+ wrapStream: async ({ doStream, params, model }) => {
201
+ const startTime = Date.now();
202
+ const stepId = generateId();
203
+ const stepNumber = getNextStepNumber();
204
+ await ensureRunCreated();
205
+
206
+ // Store original setting before overriding
207
+ const userRequestedRawChunks = params.includeRawChunks === true;
208
+ params.includeRawChunks = true;
209
+
210
+ // Log step start
211
+ await createStep({
212
+ id: stepId,
213
+ run_id: runId,
214
+ step_number: stepNumber,
215
+ type: 'stream',
216
+ model_id: model.modelId,
217
+ // @ts-expect-error broken type
218
+ provider: model.config?.provider,
219
+ started_at: new Date().toISOString(),
220
+ input: JSON.stringify({
221
+ prompt: params.prompt,
222
+ tools: params.tools,
223
+ toolChoice: params.toolChoice,
224
+ maxOutputTokens: params.maxOutputTokens,
225
+ temperature: params.temperature,
226
+ topP: params.topP,
227
+ topK: params.topK,
228
+ presencePenalty: params.presencePenalty,
229
+ frequencyPenalty: params.frequencyPenalty,
230
+ seed: params.seed,
231
+ responseFormat: params.responseFormat,
232
+ }),
233
+ provider_options: params.providerOptions
234
+ ? JSON.stringify(params.providerOptions)
235
+ : null,
236
+ });
237
+
238
+ try {
239
+ const { stream, request, response, ...rest } = await doStream();
240
+
241
+ // Collect stream output for logging
242
+ const collectedOutput: {
243
+ textParts: Array<{ id: string; text: string }>;
244
+ reasoningParts: Array<{ id: string; text: string }>;
245
+ toolCalls: LanguageModelV3StreamPart[];
246
+ finishReason?: LanguageModelV3FinishReason;
247
+ usage?: LanguageModelV3Usage;
248
+ } = {
249
+ textParts: [],
250
+ reasoningParts: [],
251
+ toolCalls: [],
252
+ };
253
+
254
+ const currentText: Map<string, string> = new Map();
255
+ const currentReasoning: Map<string, string> = new Map();
256
+ const fullStreamChunks: LanguageModelV3StreamPart[] = [];
257
+ const rawChunks: unknown[] = [];
258
+
259
+ // Track this step for cleanup on process exit
260
+ activeSteps.set(stepId, {
261
+ startTime,
262
+ collectedOutput,
263
+ request,
264
+ fullStreamChunks,
265
+ rawChunks,
266
+ });
267
+
268
+ const transformStream = new TransformStream<
269
+ LanguageModelV3StreamPart,
270
+ LanguageModelV3StreamPart
271
+ >({
272
+ transform(chunk, controller) {
273
+ // Separate raw provider chunks from other stream chunks
274
+ if (chunk.type === 'raw') {
275
+ // Store just the unwrapped rawValue for cleaner data
276
+ rawChunks.push(chunk.rawValue);
277
+ // Only pass raw chunks through if user originally requested them
278
+ if (userRequestedRawChunks) {
279
+ controller.enqueue(chunk);
280
+ }
281
+ return;
282
+ }
283
+
284
+ // Collect all non-raw chunks for full stream logging
285
+ fullStreamChunks.push(chunk);
286
+
287
+ // Collect relevant data from stream
288
+ switch (chunk.type) {
289
+ case 'text-start':
290
+ currentText.set(chunk.id, '');
291
+ break;
292
+ case 'text-delta':
293
+ currentText.set(
294
+ chunk.id,
295
+ (currentText.get(chunk.id) ?? '') + chunk.delta,
296
+ );
297
+ break;
298
+ case 'text-end':
299
+ collectedOutput.textParts.push({
300
+ id: chunk.id,
301
+ text: currentText.get(chunk.id) ?? '',
302
+ });
303
+ break;
304
+ case 'reasoning-start':
305
+ currentReasoning.set(chunk.id, '');
306
+ break;
307
+ case 'reasoning-delta':
308
+ currentReasoning.set(
309
+ chunk.id,
310
+ (currentReasoning.get(chunk.id) ?? '') + chunk.delta,
311
+ );
312
+ break;
313
+ case 'reasoning-end':
314
+ collectedOutput.reasoningParts.push({
315
+ id: chunk.id,
316
+ text: currentReasoning.get(chunk.id) ?? '',
317
+ });
318
+ break;
319
+ case 'tool-call':
320
+ collectedOutput.toolCalls.push(chunk);
321
+ break;
322
+ case 'finish':
323
+ collectedOutput.finishReason = chunk.finishReason;
324
+ collectedOutput.usage = chunk.usage;
325
+ break;
326
+ }
327
+
328
+ controller.enqueue(chunk);
329
+ },
330
+
331
+ async flush() {
332
+ // Remove from active tracking - stream completed normally
333
+ activeSteps.delete(stepId);
334
+
335
+ const durationMs = Date.now() - startTime;
336
+ await updateStepResult(stepId, {
337
+ duration_ms: durationMs,
338
+ output: JSON.stringify(collectedOutput),
339
+ usage: collectedOutput.usage
340
+ ? JSON.stringify(collectedOutput.usage)
341
+ : null,
342
+ error: null,
343
+ raw_request: request?.body ? JSON.stringify(request.body) : null,
344
+ raw_response: JSON.stringify(fullStreamChunks),
345
+ raw_chunks: JSON.stringify(rawChunks),
346
+ });
347
+ },
348
+
349
+ // @ts-expect-error - cancel is valid per WHATWG Streams spec but missing from TS types
350
+ async cancel() {
351
+ // Remove from active tracking - stream was cancelled
352
+ activeSteps.delete(stepId);
353
+
354
+ const durationMs = Date.now() - startTime;
355
+ await updateStepResult(stepId, {
356
+ duration_ms: durationMs,
357
+ output: JSON.stringify(collectedOutput),
358
+ usage: collectedOutput.usage
359
+ ? JSON.stringify(collectedOutput.usage)
360
+ : null,
361
+ error: 'Request aborted',
362
+ raw_request: request?.body ? JSON.stringify(request.body) : null,
363
+ raw_response: JSON.stringify(fullStreamChunks),
364
+ raw_chunks: JSON.stringify(rawChunks),
365
+ });
366
+ },
367
+ });
368
+
369
+ return {
370
+ stream: stream.pipeThrough(transformStream),
371
+ request,
372
+ response,
373
+ ...rest,
374
+ };
375
+ } catch (error) {
376
+ activeSteps.delete(stepId);
377
+
378
+ const durationMs = Date.now() - startTime;
379
+ await updateStepResult(stepId, {
380
+ duration_ms: durationMs,
381
+ output: null,
382
+ usage: null,
383
+ error: error instanceof Error ? error.message : String(error),
384
+ raw_request: null,
385
+ raw_response: null,
386
+ raw_chunks: null,
387
+ });
388
+ throw error;
389
+ }
390
+ },
391
+ };
392
+ };