ai 4.1.26 → 4.1.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +25 -2
- package/dist/index.d.ts +25 -2
- package/dist/index.js +34 -7
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +34 -7
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,19 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.1.28
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 6eb7fc4: feat (ai/core): url source support
|
8
|
+
|
9
|
+
## 4.1.27
|
10
|
+
|
11
|
+
### Patch Changes
|
12
|
+
|
13
|
+
- Updated dependencies [318b351]
|
14
|
+
- @ai-sdk/ui-utils@1.1.11
|
15
|
+
- @ai-sdk/react@1.1.11
|
16
|
+
|
3
17
|
## 4.1.26
|
4
18
|
|
5
19
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -2,7 +2,7 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
|
|
2
2
|
export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
3
|
import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
4
4
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
|
5
|
-
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
|
5
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
|
6
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
7
7
|
import { ServerResponse } from 'node:http';
|
8
8
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -147,6 +147,10 @@ some settings might not be supported, which can lead to suboptimal results.
|
|
147
147
|
*/
|
148
148
|
type CallWarning = LanguageModelV1CallWarning;
|
149
149
|
/**
|
150
|
+
A source that has been used as input to generate the response.
|
151
|
+
*/
|
152
|
+
type Source = LanguageModelV1Source;
|
153
|
+
/**
|
150
154
|
Tool choice for the generation. It supports the following settings:
|
151
155
|
|
152
156
|
- `auto` (default): the model can choose whether and which tools to call.
|
@@ -1640,6 +1644,10 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
1640
1644
|
*/
|
1641
1645
|
readonly reasoning: string | undefined;
|
1642
1646
|
/**
|
1647
|
+
The sources that were used to generate the text.
|
1648
|
+
*/
|
1649
|
+
readonly sources: Source[];
|
1650
|
+
/**
|
1643
1651
|
The tool calls that were made during the generation.
|
1644
1652
|
*/
|
1645
1653
|
readonly toolCalls: ToolCallArray<TOOLS>;
|
@@ -1738,6 +1746,11 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
1738
1746
|
*/
|
1739
1747
|
readonly reasoning: string | undefined;
|
1740
1748
|
/**
|
1749
|
+
Sources that have been used as input to generate the response.
|
1750
|
+
For multi-step generation, the sources are accumulated from all steps.
|
1751
|
+
*/
|
1752
|
+
readonly sources: Source[];
|
1753
|
+
/**
|
1741
1754
|
The generated structured output. It uses the `experimental_output` specification.
|
1742
1755
|
*/
|
1743
1756
|
readonly experimental_output: OUTPUT;
|
@@ -2039,6 +2052,13 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
2039
2052
|
*/
|
2040
2053
|
readonly usage: Promise<LanguageModelUsage>;
|
2041
2054
|
/**
|
2055
|
+
Sources that have been used as input to generate the response.
|
2056
|
+
For multi-step generation, the sources are accumulated from all steps.
|
2057
|
+
|
2058
|
+
Resolved when the response is finished.
|
2059
|
+
*/
|
2060
|
+
readonly sources: Promise<Source[]>;
|
2061
|
+
/**
|
2042
2062
|
The reason why the generation finished. Taken from the last step.
|
2043
2063
|
|
2044
2064
|
Resolved when the response is finished.
|
@@ -2210,6 +2230,9 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
2210
2230
|
} | {
|
2211
2231
|
type: 'reasoning';
|
2212
2232
|
textDelta: string;
|
2233
|
+
} | {
|
2234
|
+
type: 'source';
|
2235
|
+
source: Source;
|
2213
2236
|
} | ({
|
2214
2237
|
type: 'tool-call';
|
2215
2238
|
} & ToolCallUnion<TOOLS>) | {
|
@@ -2414,7 +2437,7 @@ Callback that is called for each chunk of the stream. The stream processing will
|
|
2414
2437
|
*/
|
2415
2438
|
onChunk?: (event: {
|
2416
2439
|
chunk: Extract<TextStreamPart<TOOLS>, {
|
2417
|
-
type: 'text-delta' | 'reasoning' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
|
2440
|
+
type: 'text-delta' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
|
2418
2441
|
}>;
|
2419
2442
|
}) => Promise<void> | void;
|
2420
2443
|
/**
|
package/dist/index.d.ts
CHANGED
@@ -2,7 +2,7 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
|
|
2
2
|
export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
3
|
import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
4
4
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
|
5
|
-
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
|
5
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
|
6
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
7
7
|
import { ServerResponse } from 'node:http';
|
8
8
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -147,6 +147,10 @@ some settings might not be supported, which can lead to suboptimal results.
|
|
147
147
|
*/
|
148
148
|
type CallWarning = LanguageModelV1CallWarning;
|
149
149
|
/**
|
150
|
+
A source that has been used as input to generate the response.
|
151
|
+
*/
|
152
|
+
type Source = LanguageModelV1Source;
|
153
|
+
/**
|
150
154
|
Tool choice for the generation. It supports the following settings:
|
151
155
|
|
152
156
|
- `auto` (default): the model can choose whether and which tools to call.
|
@@ -1640,6 +1644,10 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
1640
1644
|
*/
|
1641
1645
|
readonly reasoning: string | undefined;
|
1642
1646
|
/**
|
1647
|
+
The sources that were used to generate the text.
|
1648
|
+
*/
|
1649
|
+
readonly sources: Source[];
|
1650
|
+
/**
|
1643
1651
|
The tool calls that were made during the generation.
|
1644
1652
|
*/
|
1645
1653
|
readonly toolCalls: ToolCallArray<TOOLS>;
|
@@ -1738,6 +1746,11 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
1738
1746
|
*/
|
1739
1747
|
readonly reasoning: string | undefined;
|
1740
1748
|
/**
|
1749
|
+
Sources that have been used as input to generate the response.
|
1750
|
+
For multi-step generation, the sources are accumulated from all steps.
|
1751
|
+
*/
|
1752
|
+
readonly sources: Source[];
|
1753
|
+
/**
|
1741
1754
|
The generated structured output. It uses the `experimental_output` specification.
|
1742
1755
|
*/
|
1743
1756
|
readonly experimental_output: OUTPUT;
|
@@ -2039,6 +2052,13 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
2039
2052
|
*/
|
2040
2053
|
readonly usage: Promise<LanguageModelUsage>;
|
2041
2054
|
/**
|
2055
|
+
Sources that have been used as input to generate the response.
|
2056
|
+
For multi-step generation, the sources are accumulated from all steps.
|
2057
|
+
|
2058
|
+
Resolved when the response is finished.
|
2059
|
+
*/
|
2060
|
+
readonly sources: Promise<Source[]>;
|
2061
|
+
/**
|
2042
2062
|
The reason why the generation finished. Taken from the last step.
|
2043
2063
|
|
2044
2064
|
Resolved when the response is finished.
|
@@ -2210,6 +2230,9 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
2210
2230
|
} | {
|
2211
2231
|
type: 'reasoning';
|
2212
2232
|
textDelta: string;
|
2233
|
+
} | {
|
2234
|
+
type: 'source';
|
2235
|
+
source: Source;
|
2213
2236
|
} | ({
|
2214
2237
|
type: 'tool-call';
|
2215
2238
|
} & ToolCallUnion<TOOLS>) | {
|
@@ -2414,7 +2437,7 @@ Callback that is called for each chunk of the stream. The stream processing will
|
|
2414
2437
|
*/
|
2415
2438
|
onChunk?: (event: {
|
2416
2439
|
chunk: Extract<TextStreamPart<TOOLS>, {
|
2417
|
-
type: 'text-delta' | 'reasoning' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
|
2440
|
+
type: 'text-delta' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
|
2418
2441
|
}>;
|
2419
2442
|
}) => Promise<void> | void;
|
2420
2443
|
/**
|
package/dist/index.js
CHANGED
@@ -3896,7 +3896,7 @@ async function generateText({
|
|
3896
3896
|
}),
|
3897
3897
|
tracer,
|
3898
3898
|
fn: async (span) => {
|
3899
|
-
var _a16, _b, _c, _d, _e, _f, _g;
|
3899
|
+
var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
|
3900
3900
|
const mode = {
|
3901
3901
|
type: "regular",
|
3902
3902
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
@@ -3908,6 +3908,7 @@ async function generateText({
|
|
3908
3908
|
let stepCount = 0;
|
3909
3909
|
const responseMessages = [];
|
3910
3910
|
let text2 = "";
|
3911
|
+
const sources = [];
|
3911
3912
|
const steps = [];
|
3912
3913
|
let usage = {
|
3913
3914
|
completionTokens: 0,
|
@@ -4056,6 +4057,7 @@ async function generateText({
|
|
4056
4057
|
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
4057
4058
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
4058
4059
|
text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
|
4060
|
+
sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
|
4059
4061
|
if (stepType === "continue") {
|
4060
4062
|
const lastMessage = responseMessages[responseMessages.length - 1];
|
4061
4063
|
if (typeof lastMessage.content === "string") {
|
@@ -4082,16 +4084,17 @@ async function generateText({
|
|
4082
4084
|
stepType,
|
4083
4085
|
text: stepText,
|
4084
4086
|
reasoning: currentModelResponse.reasoning,
|
4087
|
+
sources: (_e = currentModelResponse.sources) != null ? _e : [],
|
4085
4088
|
toolCalls: currentToolCalls,
|
4086
4089
|
toolResults: currentToolResults,
|
4087
4090
|
finishReason: currentModelResponse.finishReason,
|
4088
4091
|
usage: currentUsage,
|
4089
4092
|
warnings: currentModelResponse.warnings,
|
4090
4093
|
logprobs: currentModelResponse.logprobs,
|
4091
|
-
request: (
|
4094
|
+
request: (_f = currentModelResponse.request) != null ? _f : {},
|
4092
4095
|
response: {
|
4093
4096
|
...currentModelResponse.response,
|
4094
|
-
headers: (
|
4097
|
+
headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers,
|
4095
4098
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
4096
4099
|
messages: structuredClone(responseMessages)
|
4097
4100
|
},
|
@@ -4121,6 +4124,7 @@ async function generateText({
|
|
4121
4124
|
return new DefaultGenerateTextResult({
|
4122
4125
|
text: text2,
|
4123
4126
|
reasoning: currentModelResponse.reasoning,
|
4127
|
+
sources,
|
4124
4128
|
outputResolver: () => {
|
4125
4129
|
if (output == null) {
|
4126
4130
|
throw new NoOutputSpecifiedError();
|
@@ -4135,10 +4139,10 @@ async function generateText({
|
|
4135
4139
|
finishReason: currentModelResponse.finishReason,
|
4136
4140
|
usage,
|
4137
4141
|
warnings: currentModelResponse.warnings,
|
4138
|
-
request: (
|
4142
|
+
request: (_h = currentModelResponse.request) != null ? _h : {},
|
4139
4143
|
response: {
|
4140
4144
|
...currentModelResponse.response,
|
4141
|
-
headers: (
|
4145
|
+
headers: (_i = currentModelResponse.rawResponse) == null ? void 0 : _i.headers,
|
4142
4146
|
messages: responseMessages
|
4143
4147
|
},
|
4144
4148
|
logprobs: currentModelResponse.logprobs,
|
@@ -4238,6 +4242,7 @@ var DefaultGenerateTextResult = class {
|
|
4238
4242
|
this.experimental_providerMetadata = options.providerMetadata;
|
4239
4243
|
this.logprobs = options.logprobs;
|
4240
4244
|
this.outputResolver = options.outputResolver;
|
4245
|
+
this.sources = options.sources;
|
4241
4246
|
}
|
4242
4247
|
get experimental_output() {
|
4243
4248
|
return this.outputResolver();
|
@@ -4515,6 +4520,7 @@ function runToolsTransformation({
|
|
4515
4520
|
switch (chunkType) {
|
4516
4521
|
case "text-delta":
|
4517
4522
|
case "reasoning":
|
4523
|
+
case "source":
|
4518
4524
|
case "response-metadata":
|
4519
4525
|
case "error": {
|
4520
4526
|
controller.enqueue(chunk);
|
@@ -4828,6 +4834,7 @@ var DefaultStreamTextResult = class {
|
|
4828
4834
|
this.providerMetadataPromise = new DelayedPromise();
|
4829
4835
|
this.textPromise = new DelayedPromise();
|
4830
4836
|
this.reasoningPromise = new DelayedPromise();
|
4837
|
+
this.sourcesPromise = new DelayedPromise();
|
4831
4838
|
this.toolCallsPromise = new DelayedPromise();
|
4832
4839
|
this.toolResultsPromise = new DelayedPromise();
|
4833
4840
|
this.requestPromise = new DelayedPromise();
|
@@ -4846,6 +4853,8 @@ var DefaultStreamTextResult = class {
|
|
4846
4853
|
let recordedContinuationText = "";
|
4847
4854
|
let recordedFullText = "";
|
4848
4855
|
let recordedReasoningText = void 0;
|
4856
|
+
let recordedStepSources = [];
|
4857
|
+
const recordedSources = [];
|
4849
4858
|
const recordedResponse = {
|
4850
4859
|
id: generateId3(),
|
4851
4860
|
timestamp: currentDate(),
|
@@ -4863,7 +4872,7 @@ var DefaultStreamTextResult = class {
|
|
4863
4872
|
async transform(chunk, controller) {
|
4864
4873
|
controller.enqueue(chunk);
|
4865
4874
|
const { part } = chunk;
|
4866
|
-
if (part.type === "text-delta" || part.type === "reasoning" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
|
4875
|
+
if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
|
4867
4876
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
4868
4877
|
}
|
4869
4878
|
if (part.type === "error") {
|
@@ -4877,6 +4886,10 @@ var DefaultStreamTextResult = class {
|
|
4877
4886
|
if (part.type === "reasoning") {
|
4878
4887
|
recordedReasoningText = (recordedReasoningText != null ? recordedReasoningText : "") + part.textDelta;
|
4879
4888
|
}
|
4889
|
+
if (part.type === "source") {
|
4890
|
+
recordedSources.push(part.source);
|
4891
|
+
recordedStepSources.push(part.source);
|
4892
|
+
}
|
4880
4893
|
if (part.type === "tool-call") {
|
4881
4894
|
recordedToolCalls.push(part);
|
4882
4895
|
}
|
@@ -4910,6 +4923,7 @@ var DefaultStreamTextResult = class {
|
|
4910
4923
|
stepType,
|
4911
4924
|
text: recordedStepText,
|
4912
4925
|
reasoning: recordedReasoningText,
|
4926
|
+
sources: recordedStepSources,
|
4913
4927
|
toolCalls: recordedToolCalls,
|
4914
4928
|
toolResults: recordedToolResults,
|
4915
4929
|
finishReason: part.finishReason,
|
@@ -4929,6 +4943,7 @@ var DefaultStreamTextResult = class {
|
|
4929
4943
|
recordedToolCalls = [];
|
4930
4944
|
recordedToolResults = [];
|
4931
4945
|
recordedStepText = "";
|
4946
|
+
recordedStepSources = [];
|
4932
4947
|
if (nextStepType !== "done") {
|
4933
4948
|
stepType = nextStepType;
|
4934
4949
|
}
|
@@ -4971,13 +4986,15 @@ var DefaultStreamTextResult = class {
|
|
4971
4986
|
self.usagePromise.resolve(usage);
|
4972
4987
|
self.textPromise.resolve(recordedFullText);
|
4973
4988
|
self.reasoningPromise.resolve(recordedReasoningText);
|
4989
|
+
self.sourcesPromise.resolve(recordedSources);
|
4974
4990
|
self.stepsPromise.resolve(recordedSteps);
|
4975
4991
|
await (onFinish == null ? void 0 : onFinish({
|
4976
4992
|
finishReason,
|
4977
4993
|
logprobs: void 0,
|
4978
4994
|
usage,
|
4979
4995
|
text: recordedFullText,
|
4980
|
-
reasoning:
|
4996
|
+
reasoning: lastStep.reasoning,
|
4997
|
+
sources: lastStep.sources,
|
4981
4998
|
toolCalls: lastStep.toolCalls,
|
4982
4999
|
toolResults: lastStep.toolResults,
|
4983
5000
|
request: (_a16 = lastStep.request) != null ? _a16 : {},
|
@@ -5253,6 +5270,10 @@ var DefaultStreamTextResult = class {
|
|
5253
5270
|
stepReasoning += chunk.textDelta;
|
5254
5271
|
break;
|
5255
5272
|
}
|
5273
|
+
case "source": {
|
5274
|
+
controller.enqueue(chunk);
|
5275
|
+
break;
|
5276
|
+
}
|
5256
5277
|
case "tool-call": {
|
5257
5278
|
controller.enqueue(chunk);
|
5258
5279
|
stepToolCalls.push(chunk);
|
@@ -5469,6 +5490,9 @@ var DefaultStreamTextResult = class {
|
|
5469
5490
|
get reasoning() {
|
5470
5491
|
return this.reasoningPromise.value;
|
5471
5492
|
}
|
5493
|
+
get sources() {
|
5494
|
+
return this.sourcesPromise.value;
|
5495
|
+
}
|
5472
5496
|
get toolCalls() {
|
5473
5497
|
return this.toolCallsPromise.value;
|
5474
5498
|
}
|
@@ -5573,6 +5597,9 @@ var DefaultStreamTextResult = class {
|
|
5573
5597
|
}
|
5574
5598
|
break;
|
5575
5599
|
}
|
5600
|
+
case "source": {
|
5601
|
+
break;
|
5602
|
+
}
|
5576
5603
|
case "tool-call-streaming-start": {
|
5577
5604
|
controller.enqueue(
|
5578
5605
|
(0, import_ui_utils8.formatDataStreamPart)("tool_call_streaming_start", {
|