ai 4.1.6 → 4.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +13 -1
- package/dist/index.d.ts +13 -1
- package/dist/index.js +122 -16
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +113 -8
- package/dist/index.mjs.map +1 -1
- package/package.json +4 -4
- package/rsc/dist/rsc-server.mjs +1 -6
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/test/dist/index.js +3 -7
- package/test/dist/index.js.map +1 -1
- package/test/dist/index.mjs +3 -7
- package/test/dist/index.mjs.map +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,21 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.1.8
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 92f5f36: feat (core): add extractReasoningMiddleware
|
8
|
+
|
9
|
+
## 4.1.7
|
10
|
+
|
11
|
+
### Patch Changes
|
12
|
+
|
13
|
+
- 066206e: feat (provider-utils): move delay to provider-utils from ai
|
14
|
+
- Updated dependencies [066206e]
|
15
|
+
- @ai-sdk/provider-utils@2.1.4
|
16
|
+
- @ai-sdk/react@1.1.5
|
17
|
+
- @ai-sdk/ui-utils@1.1.5
|
18
|
+
|
3
19
|
## 4.1.6
|
4
20
|
|
5
21
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -2387,6 +2387,18 @@ declare const experimental_wrapLanguageModel: ({ model, middleware: { transformP
|
|
2387
2387
|
providerId?: string;
|
2388
2388
|
}) => LanguageModelV1;
|
2389
2389
|
|
2390
|
+
/**
|
2391
|
+
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
2392
|
+
* as a `reasoning` property on the result.
|
2393
|
+
*
|
2394
|
+
* @param tagName - The name of the XML tag to extract reasoning from.
|
2395
|
+
* @param separator - The separator to use between reasoning and text sections.
|
2396
|
+
*/
|
2397
|
+
declare function extractReasoningMiddleware({ tagName, separator, }: {
|
2398
|
+
tagName: string;
|
2399
|
+
separator?: string;
|
2400
|
+
}): Experimental_LanguageModelV1Middleware;
|
2401
|
+
|
2390
2402
|
/**
|
2391
2403
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
2392
2404
|
*
|
@@ -2768,4 +2780,4 @@ declare namespace llamaindexAdapter {
|
|
2768
2780
|
};
|
2769
2781
|
}
|
2770
2782
|
|
2771
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|
2783
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|
package/dist/index.d.ts
CHANGED
@@ -2387,6 +2387,18 @@ declare const experimental_wrapLanguageModel: ({ model, middleware: { transformP
|
|
2387
2387
|
providerId?: string;
|
2388
2388
|
}) => LanguageModelV1;
|
2389
2389
|
|
2390
|
+
/**
|
2391
|
+
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
2392
|
+
* as a `reasoning` property on the result.
|
2393
|
+
*
|
2394
|
+
* @param tagName - The name of the XML tag to extract reasoning from.
|
2395
|
+
* @param separator - The separator to use between reasoning and text sections.
|
2396
|
+
*/
|
2397
|
+
declare function extractReasoningMiddleware({ tagName, separator, }: {
|
2398
|
+
tagName: string;
|
2399
|
+
separator?: string;
|
2400
|
+
}): Experimental_LanguageModelV1Middleware;
|
2401
|
+
|
2390
2402
|
/**
|
2391
2403
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
2392
2404
|
*
|
@@ -2768,4 +2780,4 @@ declare namespace llamaindexAdapter {
|
|
2768
2780
|
};
|
2769
2781
|
}
|
2770
2782
|
|
2771
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|
2783
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|
package/dist/index.js
CHANGED
@@ -61,16 +61,17 @@ __export(streams_exports, {
|
|
61
61
|
cosineSimilarity: () => cosineSimilarity,
|
62
62
|
createDataStream: () => createDataStream,
|
63
63
|
createDataStreamResponse: () => createDataStreamResponse,
|
64
|
-
createIdGenerator: () =>
|
64
|
+
createIdGenerator: () => import_provider_utils14.createIdGenerator,
|
65
65
|
embed: () => embed,
|
66
66
|
embedMany: () => embedMany,
|
67
67
|
experimental_createProviderRegistry: () => experimental_createProviderRegistry,
|
68
68
|
experimental_customProvider: () => experimental_customProvider,
|
69
69
|
experimental_generateImage: () => generateImage,
|
70
70
|
experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
|
71
|
+
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
71
72
|
formatAssistantStreamPart: () => import_ui_utils9.formatAssistantStreamPart,
|
72
73
|
formatDataStreamPart: () => import_ui_utils9.formatDataStreamPart,
|
73
|
-
generateId: () =>
|
74
|
+
generateId: () => import_provider_utils14.generateId,
|
74
75
|
generateObject: () => generateObject,
|
75
76
|
generateText: () => generateText,
|
76
77
|
jsonSchema: () => import_ui_utils9.jsonSchema,
|
@@ -88,7 +89,7 @@ __export(streams_exports, {
|
|
88
89
|
module.exports = __toCommonJS(streams_exports);
|
89
90
|
|
90
91
|
// core/index.ts
|
91
|
-
var
|
92
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
92
93
|
var import_ui_utils9 = require("@ai-sdk/ui-utils");
|
93
94
|
|
94
95
|
// core/data-stream/create-data-stream.ts
|
@@ -296,11 +297,6 @@ _a = symbol;
|
|
296
297
|
var import_provider3 = require("@ai-sdk/provider");
|
297
298
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
298
299
|
|
299
|
-
// util/delay.ts
|
300
|
-
async function delay(delayInMs) {
|
301
|
-
return delayInMs == null ? Promise.resolve() : new Promise((resolve) => setTimeout(resolve, delayInMs));
|
302
|
-
}
|
303
|
-
|
304
300
|
// util/retry-error.ts
|
305
301
|
var import_provider2 = require("@ai-sdk/provider");
|
306
302
|
var name2 = "AI_RetryError";
|
@@ -360,7 +356,7 @@ async function _retryWithExponentialBackoff(f, {
|
|
360
356
|
});
|
361
357
|
}
|
362
358
|
if (error instanceof Error && import_provider3.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
363
|
-
await delay(delayInMs);
|
359
|
+
await (0, import_provider_utils.delay)(delayInMs);
|
364
360
|
return _retryWithExponentialBackoff(
|
365
361
|
f,
|
366
362
|
{ maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
|
@@ -4204,6 +4200,7 @@ var object = ({
|
|
4204
4200
|
|
4205
4201
|
// core/generate-text/smooth-stream.ts
|
4206
4202
|
var import_provider18 = require("@ai-sdk/provider");
|
4203
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
4207
4204
|
var CHUNKING_REGEXPS = {
|
4208
4205
|
word: /\s*\S+\s+/m,
|
4209
4206
|
line: /[^\n]*\n/m
|
@@ -4211,7 +4208,7 @@ var CHUNKING_REGEXPS = {
|
|
4211
4208
|
function smoothStream({
|
4212
4209
|
delayInMs = 10,
|
4213
4210
|
chunking = "word",
|
4214
|
-
_internal: { delay: delay2 = delay } = {}
|
4211
|
+
_internal: { delay: delay2 = import_provider_utils11.delay } = {}
|
4215
4212
|
} = {}) {
|
4216
4213
|
const chunkingRegexp = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
4217
4214
|
if (chunkingRegexp == null) {
|
@@ -4250,7 +4247,7 @@ function smoothStream({
|
|
4250
4247
|
}
|
4251
4248
|
|
4252
4249
|
// core/generate-text/stream-text.ts
|
4253
|
-
var
|
4250
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
4254
4251
|
var import_ui_utils8 = require("@ai-sdk/ui-utils");
|
4255
4252
|
|
4256
4253
|
// util/as-array.ts
|
@@ -4538,11 +4535,11 @@ function runToolsTransformation({
|
|
4538
4535
|
}
|
4539
4536
|
|
4540
4537
|
// core/generate-text/stream-text.ts
|
4541
|
-
var originalGenerateId4 = (0,
|
4538
|
+
var originalGenerateId4 = (0, import_provider_utils12.createIdGenerator)({
|
4542
4539
|
prefix: "aitxt",
|
4543
4540
|
size: 24
|
4544
4541
|
});
|
4545
|
-
var originalGenerateMessageId2 = (0,
|
4542
|
+
var originalGenerateMessageId2 = (0, import_provider_utils12.createIdGenerator)({
|
4546
4543
|
prefix: "msg",
|
4547
4544
|
size: 24
|
4548
4545
|
});
|
@@ -5634,6 +5631,113 @@ var experimental_wrapLanguageModel = ({
|
|
5634
5631
|
};
|
5635
5632
|
};
|
5636
5633
|
|
5634
|
+
// core/util/get-potential-start-index.ts
|
5635
|
+
function getPotentialStartIndex(text2, searchedText) {
|
5636
|
+
if (searchedText.length === 0) {
|
5637
|
+
return null;
|
5638
|
+
}
|
5639
|
+
const directIndex = text2.indexOf(searchedText);
|
5640
|
+
if (directIndex !== -1) {
|
5641
|
+
return directIndex;
|
5642
|
+
}
|
5643
|
+
for (let i = text2.length - 1; i >= 0; i--) {
|
5644
|
+
const suffix = text2.substring(i);
|
5645
|
+
if (searchedText.startsWith(suffix)) {
|
5646
|
+
return i;
|
5647
|
+
}
|
5648
|
+
}
|
5649
|
+
return null;
|
5650
|
+
}
|
5651
|
+
|
5652
|
+
// core/middleware/extract-reasoning-middleware.ts
|
5653
|
+
function extractReasoningMiddleware({
|
5654
|
+
tagName,
|
5655
|
+
separator = "\n"
|
5656
|
+
}) {
|
5657
|
+
const openingTag = `<${tagName}>`;
|
5658
|
+
const closingTag = `</${tagName}>`;
|
5659
|
+
return {
|
5660
|
+
wrapGenerate: async ({ doGenerate }) => {
|
5661
|
+
const { text: text2, ...rest } = await doGenerate();
|
5662
|
+
if (text2 == null) {
|
5663
|
+
return { text: text2, ...rest };
|
5664
|
+
}
|
5665
|
+
const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
|
5666
|
+
const matches = Array.from(text2.matchAll(regexp));
|
5667
|
+
if (!matches.length) {
|
5668
|
+
return { text: text2, ...rest };
|
5669
|
+
}
|
5670
|
+
const reasoning = matches.map((match) => match[1]).join(separator);
|
5671
|
+
let textWithoutReasoning = text2;
|
5672
|
+
for (let i = matches.length - 1; i >= 0; i--) {
|
5673
|
+
const match = matches[i];
|
5674
|
+
const beforeMatch = textWithoutReasoning.slice(0, match.index);
|
5675
|
+
const afterMatch = textWithoutReasoning.slice(
|
5676
|
+
match.index + match[0].length
|
5677
|
+
);
|
5678
|
+
textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
|
5679
|
+
}
|
5680
|
+
return { text: textWithoutReasoning, reasoning, ...rest };
|
5681
|
+
},
|
5682
|
+
wrapStream: async ({ doStream }) => {
|
5683
|
+
const { stream, ...rest } = await doStream();
|
5684
|
+
let isFirstReasoning = true;
|
5685
|
+
let isFirstText = true;
|
5686
|
+
let afterSwitch = false;
|
5687
|
+
let isReasoning = false;
|
5688
|
+
let buffer = "";
|
5689
|
+
return {
|
5690
|
+
stream: stream.pipeThrough(
|
5691
|
+
new TransformStream({
|
5692
|
+
transform: (chunk, controller) => {
|
5693
|
+
if (chunk.type !== "text-delta") {
|
5694
|
+
controller.enqueue(chunk);
|
5695
|
+
return;
|
5696
|
+
}
|
5697
|
+
buffer += chunk.textDelta;
|
5698
|
+
function publish(text2) {
|
5699
|
+
if (text2.length > 0) {
|
5700
|
+
const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
|
5701
|
+
controller.enqueue({
|
5702
|
+
type: isReasoning ? "reasoning" : "text-delta",
|
5703
|
+
textDelta: prefix + text2
|
5704
|
+
});
|
5705
|
+
afterSwitch = false;
|
5706
|
+
if (isReasoning) {
|
5707
|
+
isFirstReasoning = false;
|
5708
|
+
} else {
|
5709
|
+
isFirstText = false;
|
5710
|
+
}
|
5711
|
+
}
|
5712
|
+
}
|
5713
|
+
do {
|
5714
|
+
const nextTag = isReasoning ? closingTag : openingTag;
|
5715
|
+
const startIndex = getPotentialStartIndex(buffer, nextTag);
|
5716
|
+
if (startIndex == null) {
|
5717
|
+
publish(buffer);
|
5718
|
+
buffer = "";
|
5719
|
+
break;
|
5720
|
+
}
|
5721
|
+
publish(buffer.slice(0, startIndex));
|
5722
|
+
const foundFullMatch = startIndex + nextTag.length <= buffer.length;
|
5723
|
+
if (foundFullMatch) {
|
5724
|
+
buffer = buffer.slice(startIndex + nextTag.length);
|
5725
|
+
isReasoning = !isReasoning;
|
5726
|
+
afterSwitch = true;
|
5727
|
+
} else {
|
5728
|
+
buffer = buffer.slice(startIndex);
|
5729
|
+
break;
|
5730
|
+
}
|
5731
|
+
} while (true);
|
5732
|
+
}
|
5733
|
+
})
|
5734
|
+
),
|
5735
|
+
...rest
|
5736
|
+
};
|
5737
|
+
}
|
5738
|
+
};
|
5739
|
+
}
|
5740
|
+
|
5637
5741
|
// core/prompt/append-client-message.ts
|
5638
5742
|
function appendClientMessage({
|
5639
5743
|
messages,
|
@@ -5864,6 +5968,7 @@ function magnitude(vector) {
|
|
5864
5968
|
}
|
5865
5969
|
|
5866
5970
|
// core/util/simulate-readable-stream.ts
|
5971
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
5867
5972
|
function simulateReadableStream({
|
5868
5973
|
chunks,
|
5869
5974
|
initialDelayInMs = 0,
|
@@ -5871,7 +5976,7 @@ function simulateReadableStream({
|
|
5871
5976
|
_internal
|
5872
5977
|
}) {
|
5873
5978
|
var _a15;
|
5874
|
-
const delay2 = (_a15 = _internal == null ? void 0 : _internal.delay) != null ? _a15 : delay;
|
5979
|
+
const delay2 = (_a15 = _internal == null ? void 0 : _internal.delay) != null ? _a15 : import_provider_utils13.delay;
|
5875
5980
|
let index = 0;
|
5876
5981
|
return new ReadableStream({
|
5877
5982
|
async pull(controller) {
|
@@ -6095,11 +6200,11 @@ __export(llamaindex_adapter_exports, {
|
|
6095
6200
|
toDataStream: () => toDataStream2,
|
6096
6201
|
toDataStreamResponse: () => toDataStreamResponse2
|
6097
6202
|
});
|
6098
|
-
var
|
6203
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
6099
6204
|
var import_ui_utils12 = require("@ai-sdk/ui-utils");
|
6100
6205
|
function toDataStreamInternal2(stream, callbacks) {
|
6101
6206
|
const trimStart = trimStartOfStream();
|
6102
|
-
return (0,
|
6207
|
+
return (0, import_provider_utils15.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
|
6103
6208
|
new TransformStream({
|
6104
6209
|
async transform(message, controller) {
|
6105
6210
|
controller.enqueue(trimStart(message.delta));
|
@@ -6267,6 +6372,7 @@ var StreamData = class {
|
|
6267
6372
|
experimental_customProvider,
|
6268
6373
|
experimental_generateImage,
|
6269
6374
|
experimental_wrapLanguageModel,
|
6375
|
+
extractReasoningMiddleware,
|
6270
6376
|
formatAssistantStreamPart,
|
6271
6377
|
formatDataStreamPart,
|
6272
6378
|
generateId,
|