ai 5.0.0-beta.20 → 5.0.0-beta.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +24 -0
- package/dist/bin/ai.js +292 -268
- package/dist/bin/ai.js.map +1 -1
- package/dist/bin/ai.min.js +35 -49
- package/dist/index.d.mts +57 -17
- package/dist/index.d.ts +57 -17
- package/dist/index.js +189 -122
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +187 -121
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs.map +1 -1
- package/dist/test/index.d.mts +17 -2
- package/dist/test/index.d.ts +17 -2
- package/dist/test/index.js +51 -0
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +52 -0
- package/dist/test/index.mjs.map +1 -1
- package/package.json +5 -5
package/dist/bin/ai.js
CHANGED
@@ -24,6 +24,7 @@ __export(ai_exports, {
|
|
24
24
|
formatAttachedFiles: () => formatAttachedFiles,
|
25
25
|
getMediaType: () => getMediaType,
|
26
26
|
isStdinAvailable: () => isStdinAvailable,
|
27
|
+
main: () => main,
|
27
28
|
parseArgs: () => parseArgs,
|
28
29
|
readFileContent: () => readFileContent,
|
29
30
|
resolveModel: () => resolveModel,
|
@@ -192,125 +193,152 @@ function getResponseUIMessageId({
|
|
192
193
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
193
194
|
|
194
195
|
// src/ui-message-stream/ui-message-chunks.ts
|
196
|
+
var import_v43 = require("zod/v4");
|
197
|
+
|
198
|
+
// src/types/provider-metadata.ts
|
199
|
+
var import_v42 = require("zod/v4");
|
200
|
+
|
201
|
+
// src/types/json-value.ts
|
195
202
|
var import_v4 = require("zod/v4");
|
196
|
-
var
|
197
|
-
import_v4.z.
|
198
|
-
|
199
|
-
|
203
|
+
var jsonValueSchema = import_v4.z.lazy(
|
204
|
+
() => import_v4.z.union([
|
205
|
+
import_v4.z.null(),
|
206
|
+
import_v4.z.string(),
|
207
|
+
import_v4.z.number(),
|
208
|
+
import_v4.z.boolean(),
|
209
|
+
import_v4.z.record(import_v4.z.string(), jsonValueSchema),
|
210
|
+
import_v4.z.array(jsonValueSchema)
|
211
|
+
])
|
212
|
+
);
|
213
|
+
|
214
|
+
// src/types/provider-metadata.ts
|
215
|
+
var providerMetadataSchema = import_v42.z.record(
|
216
|
+
import_v42.z.string(),
|
217
|
+
import_v42.z.record(import_v42.z.string(), jsonValueSchema)
|
218
|
+
);
|
219
|
+
|
220
|
+
// src/ui-message-stream/ui-message-chunks.ts
|
221
|
+
var uiMessageChunkSchema = import_v43.z.union([
|
222
|
+
import_v43.z.strictObject({
|
223
|
+
type: import_v43.z.literal("text-start"),
|
224
|
+
id: import_v43.z.string(),
|
225
|
+
providerMetadata: providerMetadataSchema.optional()
|
200
226
|
}),
|
201
|
-
|
202
|
-
type:
|
203
|
-
id:
|
204
|
-
delta:
|
227
|
+
import_v43.z.strictObject({
|
228
|
+
type: import_v43.z.literal("text-delta"),
|
229
|
+
id: import_v43.z.string(),
|
230
|
+
delta: import_v43.z.string(),
|
231
|
+
providerMetadata: providerMetadataSchema.optional()
|
205
232
|
}),
|
206
|
-
|
207
|
-
type:
|
208
|
-
id:
|
233
|
+
import_v43.z.strictObject({
|
234
|
+
type: import_v43.z.literal("text-end"),
|
235
|
+
id: import_v43.z.string(),
|
236
|
+
providerMetadata: providerMetadataSchema.optional()
|
209
237
|
}),
|
210
|
-
|
211
|
-
type:
|
212
|
-
errorText:
|
238
|
+
import_v43.z.strictObject({
|
239
|
+
type: import_v43.z.literal("error"),
|
240
|
+
errorText: import_v43.z.string()
|
213
241
|
}),
|
214
|
-
|
215
|
-
type:
|
216
|
-
toolCallId:
|
217
|
-
toolName:
|
218
|
-
providerExecuted:
|
242
|
+
import_v43.z.strictObject({
|
243
|
+
type: import_v43.z.literal("tool-input-start"),
|
244
|
+
toolCallId: import_v43.z.string(),
|
245
|
+
toolName: import_v43.z.string(),
|
246
|
+
providerExecuted: import_v43.z.boolean().optional()
|
219
247
|
}),
|
220
|
-
|
221
|
-
type:
|
222
|
-
toolCallId:
|
223
|
-
inputTextDelta:
|
248
|
+
import_v43.z.strictObject({
|
249
|
+
type: import_v43.z.literal("tool-input-delta"),
|
250
|
+
toolCallId: import_v43.z.string(),
|
251
|
+
inputTextDelta: import_v43.z.string()
|
224
252
|
}),
|
225
|
-
|
226
|
-
type:
|
227
|
-
toolCallId:
|
228
|
-
toolName:
|
229
|
-
input:
|
230
|
-
providerExecuted:
|
253
|
+
import_v43.z.strictObject({
|
254
|
+
type: import_v43.z.literal("tool-input-available"),
|
255
|
+
toolCallId: import_v43.z.string(),
|
256
|
+
toolName: import_v43.z.string(),
|
257
|
+
input: import_v43.z.unknown(),
|
258
|
+
providerExecuted: import_v43.z.boolean().optional(),
|
259
|
+
providerMetadata: providerMetadataSchema.optional()
|
231
260
|
}),
|
232
|
-
|
233
|
-
type:
|
234
|
-
toolCallId:
|
235
|
-
output:
|
236
|
-
providerExecuted:
|
261
|
+
import_v43.z.strictObject({
|
262
|
+
type: import_v43.z.literal("tool-output-available"),
|
263
|
+
toolCallId: import_v43.z.string(),
|
264
|
+
output: import_v43.z.unknown(),
|
265
|
+
providerExecuted: import_v43.z.boolean().optional()
|
237
266
|
}),
|
238
|
-
|
239
|
-
type:
|
240
|
-
toolCallId:
|
241
|
-
errorText:
|
242
|
-
providerExecuted:
|
267
|
+
import_v43.z.strictObject({
|
268
|
+
type: import_v43.z.literal("tool-output-error"),
|
269
|
+
toolCallId: import_v43.z.string(),
|
270
|
+
errorText: import_v43.z.string(),
|
271
|
+
providerExecuted: import_v43.z.boolean().optional()
|
243
272
|
}),
|
244
|
-
|
245
|
-
type:
|
246
|
-
text:
|
247
|
-
providerMetadata:
|
273
|
+
import_v43.z.strictObject({
|
274
|
+
type: import_v43.z.literal("reasoning"),
|
275
|
+
text: import_v43.z.string(),
|
276
|
+
providerMetadata: providerMetadataSchema.optional()
|
248
277
|
}),
|
249
|
-
|
250
|
-
type:
|
251
|
-
id:
|
252
|
-
providerMetadata:
|
278
|
+
import_v43.z.strictObject({
|
279
|
+
type: import_v43.z.literal("reasoning-start"),
|
280
|
+
id: import_v43.z.string(),
|
281
|
+
providerMetadata: providerMetadataSchema.optional()
|
253
282
|
}),
|
254
|
-
|
255
|
-
type:
|
256
|
-
id:
|
257
|
-
delta:
|
258
|
-
providerMetadata:
|
283
|
+
import_v43.z.strictObject({
|
284
|
+
type: import_v43.z.literal("reasoning-delta"),
|
285
|
+
id: import_v43.z.string(),
|
286
|
+
delta: import_v43.z.string(),
|
287
|
+
providerMetadata: providerMetadataSchema.optional()
|
259
288
|
}),
|
260
|
-
|
261
|
-
type:
|
262
|
-
id:
|
263
|
-
providerMetadata:
|
289
|
+
import_v43.z.strictObject({
|
290
|
+
type: import_v43.z.literal("reasoning-end"),
|
291
|
+
id: import_v43.z.string(),
|
292
|
+
providerMetadata: providerMetadataSchema.optional()
|
264
293
|
}),
|
265
|
-
|
266
|
-
type:
|
294
|
+
import_v43.z.strictObject({
|
295
|
+
type: import_v43.z.literal("reasoning-part-finish")
|
267
296
|
}),
|
268
|
-
|
269
|
-
type:
|
270
|
-
sourceId:
|
271
|
-
url:
|
272
|
-
title:
|
273
|
-
providerMetadata:
|
274
|
-
// Use z.any() for generic metadata
|
297
|
+
import_v43.z.strictObject({
|
298
|
+
type: import_v43.z.literal("source-url"),
|
299
|
+
sourceId: import_v43.z.string(),
|
300
|
+
url: import_v43.z.string(),
|
301
|
+
title: import_v43.z.string().optional(),
|
302
|
+
providerMetadata: providerMetadataSchema.optional()
|
275
303
|
}),
|
276
|
-
|
277
|
-
type:
|
278
|
-
sourceId:
|
279
|
-
mediaType:
|
280
|
-
title:
|
281
|
-
filename:
|
282
|
-
providerMetadata:
|
283
|
-
// Use z.any() for generic metadata
|
304
|
+
import_v43.z.strictObject({
|
305
|
+
type: import_v43.z.literal("source-document"),
|
306
|
+
sourceId: import_v43.z.string(),
|
307
|
+
mediaType: import_v43.z.string(),
|
308
|
+
title: import_v43.z.string(),
|
309
|
+
filename: import_v43.z.string().optional(),
|
310
|
+
providerMetadata: providerMetadataSchema.optional()
|
284
311
|
}),
|
285
|
-
|
286
|
-
type:
|
287
|
-
url:
|
288
|
-
mediaType:
|
312
|
+
import_v43.z.strictObject({
|
313
|
+
type: import_v43.z.literal("file"),
|
314
|
+
url: import_v43.z.string(),
|
315
|
+
mediaType: import_v43.z.string(),
|
316
|
+
providerMetadata: providerMetadataSchema.optional()
|
289
317
|
}),
|
290
|
-
|
291
|
-
type:
|
292
|
-
id:
|
293
|
-
data:
|
294
|
-
transient:
|
318
|
+
import_v43.z.strictObject({
|
319
|
+
type: import_v43.z.string().startsWith("data-"),
|
320
|
+
id: import_v43.z.string().optional(),
|
321
|
+
data: import_v43.z.unknown(),
|
322
|
+
transient: import_v43.z.boolean().optional()
|
295
323
|
}),
|
296
|
-
|
297
|
-
type:
|
324
|
+
import_v43.z.strictObject({
|
325
|
+
type: import_v43.z.literal("start-step")
|
298
326
|
}),
|
299
|
-
|
300
|
-
type:
|
327
|
+
import_v43.z.strictObject({
|
328
|
+
type: import_v43.z.literal("finish-step")
|
301
329
|
}),
|
302
|
-
|
303
|
-
type:
|
304
|
-
messageId:
|
305
|
-
messageMetadata:
|
330
|
+
import_v43.z.strictObject({
|
331
|
+
type: import_v43.z.literal("start"),
|
332
|
+
messageId: import_v43.z.string().optional(),
|
333
|
+
messageMetadata: import_v43.z.unknown().optional()
|
306
334
|
}),
|
307
|
-
|
308
|
-
type:
|
309
|
-
messageMetadata:
|
335
|
+
import_v43.z.strictObject({
|
336
|
+
type: import_v43.z.literal("finish"),
|
337
|
+
messageMetadata: import_v43.z.unknown().optional()
|
310
338
|
}),
|
311
|
-
|
312
|
-
type:
|
313
|
-
messageMetadata:
|
339
|
+
import_v43.z.strictObject({
|
340
|
+
type: import_v43.z.literal("message-metadata"),
|
341
|
+
messageMetadata: import_v43.z.unknown()
|
314
342
|
})
|
315
343
|
]);
|
316
344
|
function isDataUIMessageChunk(chunk) {
|
@@ -723,22 +751,25 @@ function processUIMessageStream({
|
|
723
751
|
}) {
|
724
752
|
return stream.pipeThrough(
|
725
753
|
new TransformStream({
|
726
|
-
async transform(
|
754
|
+
async transform(chunk, controller) {
|
727
755
|
await runUpdateMessageJob(async ({ state, write }) => {
|
728
|
-
var _a9, _b;
|
756
|
+
var _a9, _b, _c, _d;
|
729
757
|
function updateToolInvocationPart(options) {
|
730
758
|
var _a10;
|
731
|
-
const
|
732
|
-
(
|
759
|
+
const part = state.message.parts.find(
|
760
|
+
(part2) => isToolUIPart(part2) && part2.toolCallId === options.toolCallId
|
733
761
|
);
|
734
762
|
const anyOptions = options;
|
735
|
-
const anyPart =
|
736
|
-
if (
|
737
|
-
|
763
|
+
const anyPart = part;
|
764
|
+
if (part != null) {
|
765
|
+
part.state = options.state;
|
738
766
|
anyPart.input = anyOptions.input;
|
739
767
|
anyPart.output = anyOptions.output;
|
740
768
|
anyPart.errorText = anyOptions.errorText;
|
741
|
-
anyPart.providerExecuted = (_a10 = anyOptions.providerExecuted) != null ? _a10 :
|
769
|
+
anyPart.providerExecuted = (_a10 = anyOptions.providerExecuted) != null ? _a10 : part.providerExecuted;
|
770
|
+
if (anyOptions.providerMetadata != null && part.state === "input-available") {
|
771
|
+
part.callProviderMetadata = anyOptions.providerMetadata;
|
772
|
+
}
|
742
773
|
} else {
|
743
774
|
state.message.parts.push({
|
744
775
|
type: `tool-${options.toolName}`,
|
@@ -747,7 +778,8 @@ function processUIMessageStream({
|
|
747
778
|
input: anyOptions.input,
|
748
779
|
output: anyOptions.output,
|
749
780
|
errorText: anyOptions.errorText,
|
750
|
-
providerExecuted: anyOptions.providerExecuted
|
781
|
+
providerExecuted: anyOptions.providerExecuted,
|
782
|
+
...anyOptions.providerMetadata != null ? { callProviderMetadata: anyOptions.providerMetadata } : {}
|
751
783
|
});
|
752
784
|
}
|
753
785
|
}
|
@@ -763,27 +795,31 @@ function processUIMessageStream({
|
|
763
795
|
state.message.metadata = mergedMetadata;
|
764
796
|
}
|
765
797
|
}
|
766
|
-
switch (
|
798
|
+
switch (chunk.type) {
|
767
799
|
case "text-start": {
|
768
800
|
const textPart = {
|
769
801
|
type: "text",
|
770
802
|
text: "",
|
803
|
+
providerMetadata: chunk.providerMetadata,
|
771
804
|
state: "streaming"
|
772
805
|
};
|
773
|
-
state.activeTextParts[
|
806
|
+
state.activeTextParts[chunk.id] = textPart;
|
774
807
|
state.message.parts.push(textPart);
|
775
808
|
write();
|
776
809
|
break;
|
777
810
|
}
|
778
811
|
case "text-delta": {
|
779
|
-
state.activeTextParts[
|
812
|
+
const textPart = state.activeTextParts[chunk.id];
|
813
|
+
textPart.text += chunk.delta;
|
814
|
+
textPart.providerMetadata = (_a9 = chunk.providerMetadata) != null ? _a9 : textPart.providerMetadata;
|
780
815
|
write();
|
781
816
|
break;
|
782
817
|
}
|
783
818
|
case "text-end": {
|
784
|
-
const textPart = state.activeTextParts[
|
819
|
+
const textPart = state.activeTextParts[chunk.id];
|
785
820
|
textPart.state = "done";
|
786
|
-
|
821
|
+
textPart.providerMetadata = (_b = chunk.providerMetadata) != null ? _b : textPart.providerMetadata;
|
822
|
+
delete state.activeTextParts[chunk.id];
|
787
823
|
write();
|
788
824
|
break;
|
789
825
|
}
|
@@ -791,34 +827,34 @@ function processUIMessageStream({
|
|
791
827
|
const reasoningPart = {
|
792
828
|
type: "reasoning",
|
793
829
|
text: "",
|
794
|
-
providerMetadata:
|
830
|
+
providerMetadata: chunk.providerMetadata,
|
795
831
|
state: "streaming"
|
796
832
|
};
|
797
|
-
state.activeReasoningParts[
|
833
|
+
state.activeReasoningParts[chunk.id] = reasoningPart;
|
798
834
|
state.message.parts.push(reasoningPart);
|
799
835
|
write();
|
800
836
|
break;
|
801
837
|
}
|
802
838
|
case "reasoning-delta": {
|
803
|
-
const reasoningPart = state.activeReasoningParts[
|
804
|
-
reasoningPart.text +=
|
805
|
-
reasoningPart.providerMetadata = (
|
839
|
+
const reasoningPart = state.activeReasoningParts[chunk.id];
|
840
|
+
reasoningPart.text += chunk.delta;
|
841
|
+
reasoningPart.providerMetadata = (_c = chunk.providerMetadata) != null ? _c : reasoningPart.providerMetadata;
|
806
842
|
write();
|
807
843
|
break;
|
808
844
|
}
|
809
845
|
case "reasoning-end": {
|
810
|
-
const reasoningPart = state.activeReasoningParts[
|
811
|
-
reasoningPart.providerMetadata = (
|
846
|
+
const reasoningPart = state.activeReasoningParts[chunk.id];
|
847
|
+
reasoningPart.providerMetadata = (_d = chunk.providerMetadata) != null ? _d : reasoningPart.providerMetadata;
|
812
848
|
reasoningPart.state = "done";
|
813
|
-
delete state.activeReasoningParts[
|
849
|
+
delete state.activeReasoningParts[chunk.id];
|
814
850
|
write();
|
815
851
|
break;
|
816
852
|
}
|
817
853
|
case "file": {
|
818
854
|
state.message.parts.push({
|
819
855
|
type: "file",
|
820
|
-
mediaType:
|
821
|
-
url:
|
856
|
+
mediaType: chunk.mediaType,
|
857
|
+
url: chunk.url
|
822
858
|
});
|
823
859
|
write();
|
824
860
|
break;
|
@@ -826,10 +862,10 @@ function processUIMessageStream({
|
|
826
862
|
case "source-url": {
|
827
863
|
state.message.parts.push({
|
828
864
|
type: "source-url",
|
829
|
-
sourceId:
|
830
|
-
url:
|
831
|
-
title:
|
832
|
-
providerMetadata:
|
865
|
+
sourceId: chunk.sourceId,
|
866
|
+
url: chunk.url,
|
867
|
+
title: chunk.title,
|
868
|
+
providerMetadata: chunk.providerMetadata
|
833
869
|
});
|
834
870
|
write();
|
835
871
|
break;
|
@@ -837,40 +873,40 @@ function processUIMessageStream({
|
|
837
873
|
case "source-document": {
|
838
874
|
state.message.parts.push({
|
839
875
|
type: "source-document",
|
840
|
-
sourceId:
|
841
|
-
mediaType:
|
842
|
-
title:
|
843
|
-
filename:
|
844
|
-
providerMetadata:
|
876
|
+
sourceId: chunk.sourceId,
|
877
|
+
mediaType: chunk.mediaType,
|
878
|
+
title: chunk.title,
|
879
|
+
filename: chunk.filename,
|
880
|
+
providerMetadata: chunk.providerMetadata
|
845
881
|
});
|
846
882
|
write();
|
847
883
|
break;
|
848
884
|
}
|
849
885
|
case "tool-input-start": {
|
850
886
|
const toolInvocations = state.message.parts.filter(isToolUIPart);
|
851
|
-
state.partialToolCalls[
|
887
|
+
state.partialToolCalls[chunk.toolCallId] = {
|
852
888
|
text: "",
|
853
|
-
toolName:
|
889
|
+
toolName: chunk.toolName,
|
854
890
|
index: toolInvocations.length
|
855
891
|
};
|
856
892
|
updateToolInvocationPart({
|
857
|
-
toolCallId:
|
858
|
-
toolName:
|
893
|
+
toolCallId: chunk.toolCallId,
|
894
|
+
toolName: chunk.toolName,
|
859
895
|
state: "input-streaming",
|
860
896
|
input: void 0,
|
861
|
-
providerExecuted:
|
897
|
+
providerExecuted: chunk.providerExecuted
|
862
898
|
});
|
863
899
|
write();
|
864
900
|
break;
|
865
901
|
}
|
866
902
|
case "tool-input-delta": {
|
867
|
-
const partialToolCall = state.partialToolCalls[
|
868
|
-
partialToolCall.text +=
|
903
|
+
const partialToolCall = state.partialToolCalls[chunk.toolCallId];
|
904
|
+
partialToolCall.text += chunk.inputTextDelta;
|
869
905
|
const { value: partialArgs } = await parsePartialJson(
|
870
906
|
partialToolCall.text
|
871
907
|
);
|
872
908
|
updateToolInvocationPart({
|
873
|
-
toolCallId:
|
909
|
+
toolCallId: chunk.toolCallId,
|
874
910
|
toolName: partialToolCall.toolName,
|
875
911
|
state: "input-streaming",
|
876
912
|
input: partialArgs
|
@@ -880,23 +916,24 @@ function processUIMessageStream({
|
|
880
916
|
}
|
881
917
|
case "tool-input-available": {
|
882
918
|
updateToolInvocationPart({
|
883
|
-
toolCallId:
|
884
|
-
toolName:
|
919
|
+
toolCallId: chunk.toolCallId,
|
920
|
+
toolName: chunk.toolName,
|
885
921
|
state: "input-available",
|
886
|
-
input:
|
887
|
-
providerExecuted:
|
922
|
+
input: chunk.input,
|
923
|
+
providerExecuted: chunk.providerExecuted,
|
924
|
+
providerMetadata: chunk.providerMetadata
|
888
925
|
});
|
889
926
|
write();
|
890
|
-
if (onToolCall && !
|
927
|
+
if (onToolCall && !chunk.providerExecuted) {
|
891
928
|
const result = await onToolCall({
|
892
|
-
toolCall:
|
929
|
+
toolCall: chunk
|
893
930
|
});
|
894
931
|
if (result != null) {
|
895
932
|
updateToolInvocationPart({
|
896
|
-
toolCallId:
|
897
|
-
toolName:
|
933
|
+
toolCallId: chunk.toolCallId,
|
934
|
+
toolName: chunk.toolName,
|
898
935
|
state: "output-available",
|
899
|
-
input:
|
936
|
+
input: chunk.input,
|
900
937
|
output: result
|
901
938
|
});
|
902
939
|
write();
|
@@ -910,7 +947,7 @@ function processUIMessageStream({
|
|
910
947
|
throw new Error("tool_result must be preceded by a tool_call");
|
911
948
|
}
|
912
949
|
const toolInvocationIndex = toolInvocations.findIndex(
|
913
|
-
(invocation) => invocation.toolCallId ===
|
950
|
+
(invocation) => invocation.toolCallId === chunk.toolCallId
|
914
951
|
);
|
915
952
|
if (toolInvocationIndex === -1) {
|
916
953
|
throw new Error(
|
@@ -921,12 +958,12 @@ function processUIMessageStream({
|
|
921
958
|
toolInvocations[toolInvocationIndex]
|
922
959
|
);
|
923
960
|
updateToolInvocationPart({
|
924
|
-
toolCallId:
|
961
|
+
toolCallId: chunk.toolCallId,
|
925
962
|
toolName,
|
926
963
|
state: "output-available",
|
927
964
|
input: toolInvocations[toolInvocationIndex].input,
|
928
|
-
output:
|
929
|
-
providerExecuted:
|
965
|
+
output: chunk.output,
|
966
|
+
providerExecuted: chunk.providerExecuted
|
930
967
|
});
|
931
968
|
write();
|
932
969
|
break;
|
@@ -937,7 +974,7 @@ function processUIMessageStream({
|
|
937
974
|
throw new Error("tool_result must be preceded by a tool_call");
|
938
975
|
}
|
939
976
|
const toolInvocationIndex = toolInvocations.findIndex(
|
940
|
-
(invocation) => invocation.toolCallId ===
|
977
|
+
(invocation) => invocation.toolCallId === chunk.toolCallId
|
941
978
|
);
|
942
979
|
if (toolInvocationIndex === -1) {
|
943
980
|
throw new Error(
|
@@ -948,12 +985,12 @@ function processUIMessageStream({
|
|
948
985
|
toolInvocations[toolInvocationIndex]
|
949
986
|
);
|
950
987
|
updateToolInvocationPart({
|
951
|
-
toolCallId:
|
988
|
+
toolCallId: chunk.toolCallId,
|
952
989
|
toolName,
|
953
990
|
state: "output-error",
|
954
991
|
input: toolInvocations[toolInvocationIndex].input,
|
955
|
-
errorText:
|
956
|
-
providerExecuted:
|
992
|
+
errorText: chunk.errorText,
|
993
|
+
providerExecuted: chunk.providerExecuted
|
957
994
|
});
|
958
995
|
write();
|
959
996
|
break;
|
@@ -968,62 +1005,65 @@ function processUIMessageStream({
|
|
968
1005
|
break;
|
969
1006
|
}
|
970
1007
|
case "start": {
|
971
|
-
if (
|
972
|
-
state.message.id =
|
1008
|
+
if (chunk.messageId != null) {
|
1009
|
+
state.message.id = chunk.messageId;
|
973
1010
|
}
|
974
|
-
await updateMessageMetadata(
|
975
|
-
if (
|
1011
|
+
await updateMessageMetadata(chunk.messageMetadata);
|
1012
|
+
if (chunk.messageId != null || chunk.messageMetadata != null) {
|
976
1013
|
write();
|
977
1014
|
}
|
978
1015
|
break;
|
979
1016
|
}
|
980
1017
|
case "finish": {
|
981
|
-
await updateMessageMetadata(
|
982
|
-
if (
|
1018
|
+
await updateMessageMetadata(chunk.messageMetadata);
|
1019
|
+
if (chunk.messageMetadata != null) {
|
983
1020
|
write();
|
984
1021
|
}
|
985
1022
|
break;
|
986
1023
|
}
|
987
1024
|
case "message-metadata": {
|
988
|
-
await updateMessageMetadata(
|
989
|
-
if (
|
1025
|
+
await updateMessageMetadata(chunk.messageMetadata);
|
1026
|
+
if (chunk.messageMetadata != null) {
|
990
1027
|
write();
|
991
1028
|
}
|
992
1029
|
break;
|
993
1030
|
}
|
994
1031
|
case "error": {
|
995
|
-
onError == null ? void 0 : onError(new Error(
|
1032
|
+
onError == null ? void 0 : onError(new Error(chunk.errorText));
|
996
1033
|
break;
|
997
1034
|
}
|
998
1035
|
default: {
|
999
|
-
if (isDataUIMessageChunk(
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1036
|
+
if (isDataUIMessageChunk(chunk)) {
|
1037
|
+
if ((dataPartSchemas == null ? void 0 : dataPartSchemas[chunk.type]) != null) {
|
1038
|
+
await (0, import_provider_utils2.validateTypes)({
|
1039
|
+
value: chunk.data,
|
1040
|
+
schema: dataPartSchemas[chunk.type]
|
1041
|
+
});
|
1042
|
+
}
|
1043
|
+
const dataChunk = chunk;
|
1044
|
+
if (dataChunk.transient) {
|
1045
|
+
onData == null ? void 0 : onData(dataChunk);
|
1003
1046
|
break;
|
1004
1047
|
}
|
1005
|
-
const
|
1006
|
-
(
|
1048
|
+
const existingUIPart = dataChunk.id != null ? state.message.parts.find(
|
1049
|
+
(chunkArg) => dataChunk.type === chunkArg.type && dataChunk.id === chunkArg.id
|
1007
1050
|
) : void 0;
|
1008
|
-
if (
|
1009
|
-
|
1051
|
+
if (existingUIPart != null) {
|
1052
|
+
existingUIPart.data = dataChunk.data;
|
1010
1053
|
} else {
|
1011
|
-
state.message.parts.push(
|
1054
|
+
state.message.parts.push(dataChunk);
|
1012
1055
|
}
|
1013
|
-
onData == null ? void 0 : onData(
|
1056
|
+
onData == null ? void 0 : onData(dataChunk);
|
1014
1057
|
write();
|
1015
1058
|
}
|
1016
1059
|
}
|
1017
1060
|
}
|
1018
|
-
controller.enqueue(
|
1061
|
+
controller.enqueue(chunk);
|
1019
1062
|
});
|
1020
1063
|
}
|
1021
1064
|
})
|
1022
1065
|
);
|
1023
1066
|
}
|
1024
|
-
function isObject(value) {
|
1025
|
-
return typeof value === "object" && value !== null;
|
1026
|
-
}
|
1027
1067
|
|
1028
1068
|
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
1029
1069
|
function handleUIMessageStreamFinish({
|
@@ -1615,7 +1655,7 @@ async function download({ url }) {
|
|
1615
1655
|
// src/prompt/data-content.ts
|
1616
1656
|
var import_provider6 = require("@ai-sdk/provider");
|
1617
1657
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1618
|
-
var
|
1658
|
+
var import_v44 = require("zod/v4");
|
1619
1659
|
|
1620
1660
|
// src/prompt/split-data-url.ts
|
1621
1661
|
function splitDataUrl(dataUrl) {
|
@@ -1634,11 +1674,11 @@ function splitDataUrl(dataUrl) {
|
|
1634
1674
|
}
|
1635
1675
|
|
1636
1676
|
// src/prompt/data-content.ts
|
1637
|
-
var dataContentSchema =
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1677
|
+
var dataContentSchema = import_v44.z.union([
|
1678
|
+
import_v44.z.string(),
|
1679
|
+
import_v44.z.instanceof(Uint8Array),
|
1680
|
+
import_v44.z.instanceof(ArrayBuffer),
|
1681
|
+
import_v44.z.custom(
|
1642
1682
|
// Buffer might not be available in some environments such as CloudFlare:
|
1643
1683
|
(value) => {
|
1644
1684
|
var _a9, _b;
|
@@ -2186,28 +2226,6 @@ var import_v47 = require("zod/v4");
|
|
2186
2226
|
// src/prompt/message.ts
|
2187
2227
|
var import_v46 = require("zod/v4");
|
2188
2228
|
|
2189
|
-
// src/types/provider-metadata.ts
|
2190
|
-
var import_v44 = require("zod/v4");
|
2191
|
-
|
2192
|
-
// src/types/json-value.ts
|
2193
|
-
var import_v43 = require("zod/v4");
|
2194
|
-
var jsonValueSchema = import_v43.z.lazy(
|
2195
|
-
() => import_v43.z.union([
|
2196
|
-
import_v43.z.null(),
|
2197
|
-
import_v43.z.string(),
|
2198
|
-
import_v43.z.number(),
|
2199
|
-
import_v43.z.boolean(),
|
2200
|
-
import_v43.z.record(import_v43.z.string(), jsonValueSchema),
|
2201
|
-
import_v43.z.array(jsonValueSchema)
|
2202
|
-
])
|
2203
|
-
);
|
2204
|
-
|
2205
|
-
// src/types/provider-metadata.ts
|
2206
|
-
var providerMetadataSchema = import_v44.z.record(
|
2207
|
-
import_v44.z.string(),
|
2208
|
-
import_v44.z.record(import_v44.z.string(), jsonValueSchema)
|
2209
|
-
);
|
2210
|
-
|
2211
2229
|
// src/prompt/content-part.ts
|
2212
2230
|
var import_v45 = require("zod/v4");
|
2213
2231
|
var textPartSchema = import_v45.z.object({
|
@@ -3038,7 +3056,11 @@ function toResponseMessages({
|
|
3038
3056
|
).filter((part) => part.type !== "text" || part.text.length > 0).map((part) => {
|
3039
3057
|
switch (part.type) {
|
3040
3058
|
case "text":
|
3041
|
-
return
|
3059
|
+
return {
|
3060
|
+
type: "text",
|
3061
|
+
text: part.text,
|
3062
|
+
providerOptions: part.providerMetadata
|
3063
|
+
};
|
3042
3064
|
case "reasoning":
|
3043
3065
|
return {
|
3044
3066
|
type: "reasoning",
|
@@ -3293,7 +3315,7 @@ var DefaultStreamTextResult = class {
|
|
3293
3315
|
let activeReasoningContent = {};
|
3294
3316
|
const eventProcessor = new TransformStream({
|
3295
3317
|
async transform(chunk, controller) {
|
3296
|
-
var _a9, _b;
|
3318
|
+
var _a9, _b, _c;
|
3297
3319
|
controller.enqueue(chunk);
|
3298
3320
|
const { part } = chunk;
|
3299
3321
|
if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-input-start" || part.type === "tool-input-delta" || part.type === "raw") {
|
@@ -3323,7 +3345,7 @@ var DefaultStreamTextResult = class {
|
|
3323
3345
|
return;
|
3324
3346
|
}
|
3325
3347
|
activeText.text += part.text;
|
3326
|
-
activeText.providerMetadata = part.providerMetadata;
|
3348
|
+
activeText.providerMetadata = (_a9 = part.providerMetadata) != null ? _a9 : activeText.providerMetadata;
|
3327
3349
|
}
|
3328
3350
|
if (part.type === "text-end") {
|
3329
3351
|
delete activeTextContent[part.id];
|
@@ -3349,7 +3371,7 @@ var DefaultStreamTextResult = class {
|
|
3349
3371
|
return;
|
3350
3372
|
}
|
3351
3373
|
activeReasoning.text += part.text;
|
3352
|
-
activeReasoning.providerMetadata = (
|
3374
|
+
activeReasoning.providerMetadata = (_b = part.providerMetadata) != null ? _b : activeReasoning.providerMetadata;
|
3353
3375
|
}
|
3354
3376
|
if (part.type === "reasoning-end") {
|
3355
3377
|
const activeReasoning = activeReasoningContent[part.id];
|
@@ -3363,7 +3385,7 @@ var DefaultStreamTextResult = class {
|
|
3363
3385
|
});
|
3364
3386
|
return;
|
3365
3387
|
}
|
3366
|
-
activeReasoning.providerMetadata = (
|
3388
|
+
activeReasoning.providerMetadata = (_c = part.providerMetadata) != null ? _c : activeReasoning.providerMetadata;
|
3367
3389
|
delete activeReasoningContent[part.id];
|
3368
3390
|
}
|
3369
3391
|
if (part.type === "file") {
|
@@ -4052,10 +4074,10 @@ var DefaultStreamTextResult = class {
|
|
4052
4074
|
sendFinish = true,
|
4053
4075
|
onError = import_provider16.getErrorMessage
|
4054
4076
|
} = {}) {
|
4055
|
-
const responseMessageId = getResponseUIMessageId({
|
4077
|
+
const responseMessageId = generateMessageId != null ? getResponseUIMessageId({
|
4056
4078
|
originalMessages,
|
4057
|
-
responseMessageId:
|
4058
|
-
});
|
4079
|
+
responseMessageId: generateMessageId
|
4080
|
+
}) : void 0;
|
4059
4081
|
const baseStream = this.fullStream.pipeThrough(
|
4060
4082
|
new TransformStream({
|
4061
4083
|
transform: async (part, controller) => {
|
@@ -4063,26 +4085,35 @@ var DefaultStreamTextResult = class {
|
|
4063
4085
|
const partType = part.type;
|
4064
4086
|
switch (partType) {
|
4065
4087
|
case "text-start": {
|
4066
|
-
controller.enqueue({
|
4088
|
+
controller.enqueue({
|
4089
|
+
type: "text-start",
|
4090
|
+
id: part.id,
|
4091
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4092
|
+
});
|
4067
4093
|
break;
|
4068
4094
|
}
|
4069
4095
|
case "text": {
|
4070
4096
|
controller.enqueue({
|
4071
4097
|
type: "text-delta",
|
4072
4098
|
id: part.id,
|
4073
|
-
delta: part.text
|
4099
|
+
delta: part.text,
|
4100
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4074
4101
|
});
|
4075
4102
|
break;
|
4076
4103
|
}
|
4077
4104
|
case "text-end": {
|
4078
|
-
controller.enqueue({
|
4105
|
+
controller.enqueue({
|
4106
|
+
type: "text-end",
|
4107
|
+
id: part.id,
|
4108
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4109
|
+
});
|
4079
4110
|
break;
|
4080
4111
|
}
|
4081
4112
|
case "reasoning-start": {
|
4082
4113
|
controller.enqueue({
|
4083
4114
|
type: "reasoning-start",
|
4084
4115
|
id: part.id,
|
4085
|
-
providerMetadata: part.providerMetadata
|
4116
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4086
4117
|
});
|
4087
4118
|
break;
|
4088
4119
|
}
|
@@ -4092,7 +4123,7 @@ var DefaultStreamTextResult = class {
|
|
4092
4123
|
type: "reasoning-delta",
|
4093
4124
|
id: part.id,
|
4094
4125
|
delta: part.text,
|
4095
|
-
providerMetadata: part.providerMetadata
|
4126
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4096
4127
|
});
|
4097
4128
|
}
|
4098
4129
|
break;
|
@@ -4101,7 +4132,7 @@ var DefaultStreamTextResult = class {
|
|
4101
4132
|
controller.enqueue({
|
4102
4133
|
type: "reasoning-end",
|
4103
4134
|
id: part.id,
|
4104
|
-
providerMetadata: part.providerMetadata
|
4135
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4105
4136
|
});
|
4106
4137
|
break;
|
4107
4138
|
}
|
@@ -4120,7 +4151,7 @@ var DefaultStreamTextResult = class {
|
|
4120
4151
|
sourceId: part.id,
|
4121
4152
|
url: part.url,
|
4122
4153
|
title: part.title,
|
4123
|
-
providerMetadata: part.providerMetadata
|
4154
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4124
4155
|
});
|
4125
4156
|
}
|
4126
4157
|
if (sendSources && part.sourceType === "document") {
|
@@ -4130,7 +4161,7 @@ var DefaultStreamTextResult = class {
|
|
4130
4161
|
mediaType: part.mediaType,
|
4131
4162
|
title: part.title,
|
4132
4163
|
filename: part.filename,
|
4133
|
-
providerMetadata: part.providerMetadata
|
4164
|
+
...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
|
4134
4165
|
});
|
4135
4166
|
}
|
4136
4167
|
break;
|
@@ -4158,7 +4189,8 @@ var DefaultStreamTextResult = class {
|
|
4158
4189
|
toolCallId: part.toolCallId,
|
4159
4190
|
toolName: part.toolName,
|
4160
4191
|
input: part.input,
|
4161
|
-
providerExecuted: part.providerExecuted
|
4192
|
+
providerExecuted: part.providerExecuted,
|
4193
|
+
providerMetadata: part.providerMetadata
|
4162
4194
|
});
|
4163
4195
|
break;
|
4164
4196
|
}
|
@@ -4199,8 +4231,8 @@ var DefaultStreamTextResult = class {
|
|
4199
4231
|
if (sendStart) {
|
4200
4232
|
controller.enqueue({
|
4201
4233
|
type: "start",
|
4202
|
-
|
4203
|
-
|
4234
|
+
...messageMetadataValue != null ? { messageMetadata: messageMetadataValue } : {},
|
4235
|
+
...responseMessageId != null ? { messageId: responseMessageId } : {}
|
4204
4236
|
});
|
4205
4237
|
}
|
4206
4238
|
break;
|
@@ -4209,7 +4241,7 @@ var DefaultStreamTextResult = class {
|
|
4209
4241
|
if (sendFinish) {
|
4210
4242
|
controller.enqueue({
|
4211
4243
|
type: "finish",
|
4212
|
-
messageMetadata: messageMetadataValue
|
4244
|
+
...messageMetadataValue != null ? { messageMetadata: messageMetadataValue } : {}
|
4213
4245
|
});
|
4214
4246
|
}
|
4215
4247
|
break;
|
@@ -4383,7 +4415,7 @@ function readFileContent(filePath) {
|
|
4383
4415
|
function parseArgs() {
|
4384
4416
|
const args = process.argv.slice(2);
|
4385
4417
|
const options = {
|
4386
|
-
model: process.env.
|
4418
|
+
model: process.env.AI_DEFAULT_MODEL || "openai/gpt-4",
|
4387
4419
|
files: [],
|
4388
4420
|
help: false,
|
4389
4421
|
version: false,
|
@@ -4453,43 +4485,26 @@ function showHelp() {
|
|
4453
4485
|
|
4454
4486
|
AI CLI - Stream text generation from various AI models
|
4455
4487
|
|
4456
|
-
Arguments:
|
4457
|
-
prompt The prompt to send to the AI model (optional if using stdin)
|
4458
|
-
|
4459
4488
|
Options:
|
4460
|
-
-m, --model <model> Model to use
|
4461
|
-
|
4462
|
-
|
4463
|
-
-
|
4464
|
-
-
|
4465
|
-
-
|
4466
|
-
-
|
4467
|
-
-V, --version Output the version number
|
4489
|
+
-m, --model <model> Model to use (default: "openai/gpt-4")
|
4490
|
+
Format: provider/model (e.g., anthropic/claude-3-5-sonnet)
|
4491
|
+
-f, --file <file> Attach file(s) to prompt
|
4492
|
+
-s, --system <message> System message
|
4493
|
+
-v, --verbose Show detailed output
|
4494
|
+
-h, --help Show help
|
4495
|
+
-V, --version Show version
|
4468
4496
|
|
4497
|
+
Authentication (required):
|
4498
|
+
export AI_GATEWAY_API_KEY="your-key" # Get from Vercel Dashboard (AI tab)
|
4499
|
+
|
4469
4500
|
Environment Variables:
|
4470
|
-
|
4471
|
-
|
4472
|
-
|
4473
|
-
|
4474
|
-
Authentication (choose one):
|
4475
|
-
- VERCEL_OIDC_TOKEN: Vercel OIDC token (for Vercel projects)
|
4476
|
-
- AI_GATEWAY_API_KEY: AI Gateway API key
|
4477
|
-
|
4478
|
-
Setting Environment Variables:
|
4479
|
-
# Option 1: Export in current session
|
4480
|
-
export AI_GATEWAY_API_KEY="your-key-here"
|
4481
|
-
export AI_MODEL="anthropic/claude-3-5-sonnet-20241022"
|
4482
|
-
|
4483
|
-
# Option 2: Inline for single command
|
4484
|
-
AI_GATEWAY_API_KEY="your-key" ai "Hello world"
|
4485
|
-
|
4486
|
-
# Option 3: Add to shell profile (~/.bashrc, ~/.zshrc)
|
4487
|
-
echo 'export AI_GATEWAY_API_KEY="your-key"' >> ~/.bashrc
|
4501
|
+
AI_DEFAULT_MODEL: Default model to use
|
4502
|
+
AI_SYSTEM: Default system message
|
4503
|
+
AI_VERBOSE: Set to 'true' for detailed output
|
4488
4504
|
|
4489
4505
|
Examples:
|
4490
4506
|
npx ai "Hello, world!"
|
4491
|
-
npx ai "Write a poem" -m anthropic/claude-3-5-sonnet
|
4492
|
-
npx ai "Explain quantum physics" -m groq/llama-3.1-8b-instant
|
4507
|
+
npx ai "Write a poem" -m anthropic/claude-3-5-sonnet
|
4493
4508
|
npx ai "Explain this code" -f script.js -f README.md
|
4494
4509
|
echo "What is life?" | npx ai
|
4495
4510
|
cat file.txt | npx ai "Summarize this content"
|
@@ -4499,19 +4514,10 @@ Unix-style piping:
|
|
4499
4514
|
echo "Hello world" | npx ai "Translate to French"
|
4500
4515
|
cat README.md | npx ai "Summarize this"
|
4501
4516
|
curl -s https://api.github.com/repos/vercel/ai | npx ai "What is this repository about?"
|
4502
|
-
|
4503
|
-
Authentication Setup:
|
4504
|
-
This CLI uses the Vercel AI Gateway. You need ONE of these for authentication:
|
4505
|
-
|
4506
|
-
OIDC Token (for Vercel projects):
|
4507
|
-
- Automatically available in Vercel deployments
|
4508
|
-
- For local development: run 'vercel env pull' or use 'vercel dev'
|
4509
4517
|
|
4510
|
-
|
4511
|
-
- Get your key from the AI Gateway dashboard
|
4512
|
-
- Set: export AI_GATEWAY_API_KEY="your-key-here"
|
4518
|
+
The gateway supports OpenAI, Anthropic, Google, Groq, and more providers.
|
4513
4519
|
|
4514
|
-
|
4520
|
+
For detailed setup instructions, visit: https://ai-sdk.dev/docs/cli/authentication`);
|
4515
4521
|
}
|
4516
4522
|
function showVersion() {
|
4517
4523
|
console.log("1.0.0");
|
@@ -4595,6 +4601,23 @@ ${prompt}` : stdinContent;
|
|
4595
4601
|
}
|
4596
4602
|
console.error("");
|
4597
4603
|
}
|
4604
|
+
const hasApiKey = process.env.AI_GATEWAY_API_KEY;
|
4605
|
+
if (!hasApiKey) {
|
4606
|
+
console.error(`Error: Authentication required.
|
4607
|
+
|
4608
|
+
Set up authentication with one of these options:
|
4609
|
+
|
4610
|
+
# Option 1: Export in current session
|
4611
|
+
export AI_GATEWAY_API_KEY="your-key-here"
|
4612
|
+
export AI_DEFAULT_MODEL="anthropic/claude-3-5-sonnet"
|
4613
|
+
|
4614
|
+
# Option 2: Add to shell profile (~/.bashrc, ~/.zshrc)
|
4615
|
+
echo 'export AI_GATEWAY_API_KEY="your-key"' >> ~/.bashrc
|
4616
|
+
|
4617
|
+
Get your API key from the Vercel Dashboard (AI tab > API keys).
|
4618
|
+
Use --help for more details and examples.`);
|
4619
|
+
process.exit(1);
|
4620
|
+
}
|
4598
4621
|
const model = resolveModel(options.model);
|
4599
4622
|
let messages;
|
4600
4623
|
if (imageFiles.length > 0) {
|
@@ -4655,6 +4678,7 @@ main().catch((error) => {
|
|
4655
4678
|
formatAttachedFiles,
|
4656
4679
|
getMediaType,
|
4657
4680
|
isStdinAvailable,
|
4681
|
+
main,
|
4658
4682
|
parseArgs,
|
4659
4683
|
readFileContent,
|
4660
4684
|
resolveModel,
|