ai 3.1.1 → 3.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +39 -4
- package/dist/index.d.ts +39 -4
- package/dist/index.js +35 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +35 -3
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
- package/react/dist/index.js +2 -0
- package/react/dist/index.js.map +1 -1
- package/react/dist/index.mjs +2 -0
- package/react/dist/index.mjs.map +1 -1
- package/rsc/dist/rsc-shared.mjs.map +1 -1
package/dist/index.d.mts
CHANGED
@@ -303,7 +303,16 @@ The schema of the object that the model should generate.
|
|
303
303
|
*/
|
304
304
|
schema: z.Schema<T>;
|
305
305
|
/**
|
306
|
-
The mode to use for object generation.
|
306
|
+
The mode to use for object generation.
|
307
|
+
|
308
|
+
The Zod schema is converted in a JSON schema and used in one of the following ways
|
309
|
+
|
310
|
+
- 'auto': The provider will choose the best mode for the model.
|
311
|
+
- 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
|
312
|
+
- 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
|
313
|
+
- 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
|
314
|
+
|
315
|
+
Please note that most providers do not support all modes.
|
307
316
|
|
308
317
|
Default and recommended: 'auto' (best mode for the model).
|
309
318
|
*/
|
@@ -423,10 +432,19 @@ The schema of the object that the model should generate.
|
|
423
432
|
*/
|
424
433
|
schema: z.Schema<T>;
|
425
434
|
/**
|
426
|
-
The mode to use for object generation.
|
435
|
+
The mode to use for object generation.
|
436
|
+
|
437
|
+
The Zod schema is converted in a JSON schema and used in one of the following ways
|
438
|
+
|
439
|
+
- 'auto': The provider will choose the best mode for the model.
|
440
|
+
- 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
|
441
|
+
- 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
|
442
|
+
- 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
|
443
|
+
|
444
|
+
Please note that most providers do not support all modes.
|
427
445
|
|
428
446
|
Default and recommended: 'auto' (best mode for the model).
|
429
|
-
|
447
|
+
*/
|
430
448
|
mode?: 'auto' | 'json' | 'tool' | 'grammar';
|
431
449
|
}): Promise<StreamObjectResult<T>>;
|
432
450
|
type ObjectStreamPartInput = {
|
@@ -764,12 +782,20 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
764
782
|
A result object for accessing different stream types and additional information.
|
765
783
|
*/
|
766
784
|
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
767
|
-
private
|
785
|
+
private originalStream;
|
768
786
|
/**
|
769
787
|
Warnings from the model provider (e.g. unsupported settings)
|
770
788
|
*/
|
771
789
|
readonly warnings: CallWarning[] | undefined;
|
772
790
|
/**
|
791
|
+
The token usage of the generated text. Resolved when the response is finished.
|
792
|
+
*/
|
793
|
+
readonly usage: Promise<TokenUsage>;
|
794
|
+
/**
|
795
|
+
The reason why the generation finished. Resolved when the response is finished.
|
796
|
+
*/
|
797
|
+
readonly finishReason: Promise<FinishReason>;
|
798
|
+
/**
|
773
799
|
Optional raw response data.
|
774
800
|
*/
|
775
801
|
rawResponse?: {
|
@@ -786,6 +812,15 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
786
812
|
};
|
787
813
|
});
|
788
814
|
/**
|
815
|
+
Split out a new stream from the original stream.
|
816
|
+
The original stream is replaced to allow for further splitting,
|
817
|
+
since we do not know how many times the stream will be split.
|
818
|
+
|
819
|
+
Note: this leads to buffering the stream content on the server.
|
820
|
+
However, the LLM results are expected to be small enough to not cause issues.
|
821
|
+
*/
|
822
|
+
private teeStream;
|
823
|
+
/**
|
789
824
|
A text stream that returns only the generated text deltas. You can use it
|
790
825
|
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
791
826
|
stream will throw the error.
|
package/dist/index.d.ts
CHANGED
@@ -303,7 +303,16 @@ The schema of the object that the model should generate.
|
|
303
303
|
*/
|
304
304
|
schema: z.Schema<T>;
|
305
305
|
/**
|
306
|
-
The mode to use for object generation.
|
306
|
+
The mode to use for object generation.
|
307
|
+
|
308
|
+
The Zod schema is converted in a JSON schema and used in one of the following ways
|
309
|
+
|
310
|
+
- 'auto': The provider will choose the best mode for the model.
|
311
|
+
- 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
|
312
|
+
- 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
|
313
|
+
- 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
|
314
|
+
|
315
|
+
Please note that most providers do not support all modes.
|
307
316
|
|
308
317
|
Default and recommended: 'auto' (best mode for the model).
|
309
318
|
*/
|
@@ -423,10 +432,19 @@ The schema of the object that the model should generate.
|
|
423
432
|
*/
|
424
433
|
schema: z.Schema<T>;
|
425
434
|
/**
|
426
|
-
The mode to use for object generation.
|
435
|
+
The mode to use for object generation.
|
436
|
+
|
437
|
+
The Zod schema is converted in a JSON schema and used in one of the following ways
|
438
|
+
|
439
|
+
- 'auto': The provider will choose the best mode for the model.
|
440
|
+
- 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
|
441
|
+
- 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
|
442
|
+
- 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
|
443
|
+
|
444
|
+
Please note that most providers do not support all modes.
|
427
445
|
|
428
446
|
Default and recommended: 'auto' (best mode for the model).
|
429
|
-
|
447
|
+
*/
|
430
448
|
mode?: 'auto' | 'json' | 'tool' | 'grammar';
|
431
449
|
}): Promise<StreamObjectResult<T>>;
|
432
450
|
type ObjectStreamPartInput = {
|
@@ -764,12 +782,20 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
764
782
|
A result object for accessing different stream types and additional information.
|
765
783
|
*/
|
766
784
|
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
767
|
-
private
|
785
|
+
private originalStream;
|
768
786
|
/**
|
769
787
|
Warnings from the model provider (e.g. unsupported settings)
|
770
788
|
*/
|
771
789
|
readonly warnings: CallWarning[] | undefined;
|
772
790
|
/**
|
791
|
+
The token usage of the generated text. Resolved when the response is finished.
|
792
|
+
*/
|
793
|
+
readonly usage: Promise<TokenUsage>;
|
794
|
+
/**
|
795
|
+
The reason why the generation finished. Resolved when the response is finished.
|
796
|
+
*/
|
797
|
+
readonly finishReason: Promise<FinishReason>;
|
798
|
+
/**
|
773
799
|
Optional raw response data.
|
774
800
|
*/
|
775
801
|
rawResponse?: {
|
@@ -786,6 +812,15 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
786
812
|
};
|
787
813
|
});
|
788
814
|
/**
|
815
|
+
Split out a new stream from the original stream.
|
816
|
+
The original stream is replaced to allow for further splitting,
|
817
|
+
since we do not know how many times the stream will be split.
|
818
|
+
|
819
|
+
Note: this leads to buffering the stream content on the server.
|
820
|
+
However, the LLM results are expected to be small enough to not cause issues.
|
821
|
+
*/
|
822
|
+
private teeStream;
|
823
|
+
/**
|
789
824
|
A text stream that returns only the generated text deltas. You can use it
|
790
825
|
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
791
826
|
stream will throw the error.
|
package/dist/index.js
CHANGED
@@ -755,6 +755,7 @@ function fixJson(input) {
|
|
755
755
|
break;
|
756
756
|
}
|
757
757
|
case "}": {
|
758
|
+
lastValidIndex = i;
|
758
759
|
stack.pop();
|
759
760
|
break;
|
760
761
|
}
|
@@ -1486,9 +1487,40 @@ var StreamTextResult = class {
|
|
1486
1487
|
warnings,
|
1487
1488
|
rawResponse
|
1488
1489
|
}) {
|
1489
|
-
this.originalStream = stream;
|
1490
1490
|
this.warnings = warnings;
|
1491
1491
|
this.rawResponse = rawResponse;
|
1492
|
+
let resolveUsage;
|
1493
|
+
this.usage = new Promise((resolve) => {
|
1494
|
+
resolveUsage = resolve;
|
1495
|
+
});
|
1496
|
+
let resolveFinishReason;
|
1497
|
+
this.finishReason = new Promise((resolve) => {
|
1498
|
+
resolveFinishReason = resolve;
|
1499
|
+
});
|
1500
|
+
this.originalStream = stream.pipeThrough(
|
1501
|
+
new TransformStream({
|
1502
|
+
async transform(chunk, controller) {
|
1503
|
+
controller.enqueue(chunk);
|
1504
|
+
if (chunk.type === "finish") {
|
1505
|
+
resolveUsage(chunk.usage);
|
1506
|
+
resolveFinishReason(chunk.finishReason);
|
1507
|
+
}
|
1508
|
+
}
|
1509
|
+
})
|
1510
|
+
);
|
1511
|
+
}
|
1512
|
+
/**
|
1513
|
+
Split out a new stream from the original stream.
|
1514
|
+
The original stream is replaced to allow for further splitting,
|
1515
|
+
since we do not know how many times the stream will be split.
|
1516
|
+
|
1517
|
+
Note: this leads to buffering the stream content on the server.
|
1518
|
+
However, the LLM results are expected to be small enough to not cause issues.
|
1519
|
+
*/
|
1520
|
+
teeStream() {
|
1521
|
+
const [stream1, stream2] = this.originalStream.tee();
|
1522
|
+
this.originalStream = stream2;
|
1523
|
+
return stream1;
|
1492
1524
|
}
|
1493
1525
|
/**
|
1494
1526
|
A text stream that returns only the generated text deltas. You can use it
|
@@ -1496,7 +1528,7 @@ var StreamTextResult = class {
|
|
1496
1528
|
stream will throw the error.
|
1497
1529
|
*/
|
1498
1530
|
get textStream() {
|
1499
|
-
return createAsyncIterableStream(this.
|
1531
|
+
return createAsyncIterableStream(this.teeStream(), {
|
1500
1532
|
transform(chunk, controller) {
|
1501
1533
|
if (chunk.type === "text-delta") {
|
1502
1534
|
if (chunk.textDelta.length > 0) {
|
@@ -1515,7 +1547,7 @@ var StreamTextResult = class {
|
|
1515
1547
|
stream will throw the error.
|
1516
1548
|
*/
|
1517
1549
|
get fullStream() {
|
1518
|
-
return createAsyncIterableStream(this.
|
1550
|
+
return createAsyncIterableStream(this.teeStream(), {
|
1519
1551
|
transform(chunk, controller) {
|
1520
1552
|
if (chunk.type === "text-delta") {
|
1521
1553
|
if (chunk.textDelta.length > 0) {
|