ai 4.0.7 → 4.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +84 -35
- package/dist/index.d.ts +84 -35
- package/dist/index.js +345 -200
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +313 -170
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,17 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.0.9
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- fda9695: feat (ai/core): reworked data stream management
|
8
|
+
|
9
|
+
## 4.0.8
|
10
|
+
|
11
|
+
### Patch Changes
|
12
|
+
|
13
|
+
- a803d76: feat (ai/core): pass toolCallId option into tool execute function
|
14
|
+
|
3
15
|
## 4.0.7
|
4
16
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1,12 +1,48 @@
|
|
1
|
-
import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
1
|
+
import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
3
|
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
|
-
import {
|
5
|
-
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
4
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
6
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
|
+
import { ServerResponse } from 'node:http';
|
7
|
+
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
7
8
|
import { z } from 'zod';
|
8
|
-
import { ServerResponse } from 'http';
|
9
|
-
|
9
|
+
import { ServerResponse as ServerResponse$1 } from 'http';
|
10
|
+
|
11
|
+
interface DataStreamWriter {
|
12
|
+
/**
|
13
|
+
* Appends a data part to the stream.
|
14
|
+
*/
|
15
|
+
writeData(value: JSONValue): void;
|
16
|
+
/**
|
17
|
+
* Appends a message annotation to the stream.
|
18
|
+
*/
|
19
|
+
writeMessageAnnotation(value: JSONValue): void;
|
20
|
+
/**
|
21
|
+
* Merges the contents of another stream to this stream.
|
22
|
+
*/
|
23
|
+
merge(stream: ReadableStream<DataStreamString>): void;
|
24
|
+
/**
|
25
|
+
* Error handler that is used by the data stream writer.
|
26
|
+
* This is intended for forwarding when merging streams
|
27
|
+
* to prevent duplicated error masking.
|
28
|
+
*/
|
29
|
+
onError: ((error: unknown) => string) | undefined;
|
30
|
+
}
|
31
|
+
|
32
|
+
declare function createDataStream({ execute, onError, }: {
|
33
|
+
execute: (dataStream: DataStreamWriter) => Promise<void> | void;
|
34
|
+
onError?: (error: unknown) => string;
|
35
|
+
}): ReadableStream<DataStreamString>;
|
36
|
+
|
37
|
+
declare function createDataStreamResponse({ status, statusText, headers, execute, onError, }: ResponseInit & {
|
38
|
+
execute: (dataStream: DataStreamWriter) => Promise<void> | void;
|
39
|
+
onError?: (error: unknown) => string;
|
40
|
+
}): Response;
|
41
|
+
|
42
|
+
declare function pipeDataStreamToResponse(response: ServerResponse, { status, statusText, headers, execute, onError, }: ResponseInit & {
|
43
|
+
execute: (writer: DataStreamWriter) => Promise<void> | void;
|
44
|
+
onError?: (error: unknown) => string;
|
45
|
+
}): void;
|
10
46
|
|
11
47
|
/**
|
12
48
|
* Telemetry configuration.
|
@@ -943,7 +979,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
|
943
979
|
@param response A Node.js response-like object (ServerResponse).
|
944
980
|
@param init Optional headers, status code, and status text.
|
945
981
|
*/
|
946
|
-
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
982
|
+
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
947
983
|
/**
|
948
984
|
Creates a simple text stream response.
|
949
985
|
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
@@ -1178,6 +1214,21 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1178
1214
|
|
1179
1215
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1180
1216
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
1217
|
+
interface ToolExecutionOptions {
|
1218
|
+
/**
|
1219
|
+
* The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
|
1220
|
+
*/
|
1221
|
+
toolCallId: string;
|
1222
|
+
/**
|
1223
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1224
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1225
|
+
*/
|
1226
|
+
messages: CoreMessage[];
|
1227
|
+
/**
|
1228
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1229
|
+
*/
|
1230
|
+
abortSignal?: AbortSignal;
|
1231
|
+
}
|
1181
1232
|
/**
|
1182
1233
|
A tool contains the description and the schema of the input that the tool expects.
|
1183
1234
|
This enables the language model to generate the input.
|
@@ -1202,17 +1253,7 @@ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
|
1202
1253
|
@args is the input of the tool call.
|
1203
1254
|
@options.abortSignal is a signal that can be used to abort the tool call.
|
1204
1255
|
*/
|
1205
|
-
execute?: (args: inferParameters<PARAMETERS>, options:
|
1206
|
-
/**
|
1207
|
-
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1208
|
-
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1209
|
-
*/
|
1210
|
-
messages: CoreMessage[];
|
1211
|
-
/**
|
1212
|
-
* An optional abort signal that indicates that the overall operation should be aborted.
|
1213
|
-
*/
|
1214
|
-
abortSignal?: AbortSignal;
|
1215
|
-
}) => PromiseLike<RESULT>;
|
1256
|
+
execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1216
1257
|
} & ({
|
1217
1258
|
/**
|
1218
1259
|
Function tool.
|
@@ -1240,15 +1281,9 @@ The arguments for configuring the tool. Must match the expected arguments define
|
|
1240
1281
|
Helper function for inferring the execute args of a tool.
|
1241
1282
|
*/
|
1242
1283
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1243
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1244
|
-
messages: CoreMessage[];
|
1245
|
-
abortSignal?: AbortSignal;
|
1246
|
-
}) => PromiseLike<RESULT>;
|
1284
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1247
1285
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1248
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1249
|
-
messages: CoreMessage[];
|
1250
|
-
abortSignal?: AbortSignal;
|
1251
|
-
}) => PromiseLike<RESULT>;
|
1286
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1252
1287
|
};
|
1253
1288
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1254
1289
|
execute?: undefined;
|
@@ -1606,6 +1641,8 @@ changing the tool call and result types in the result.
|
|
1606
1641
|
|
1607
1642
|
/**
|
1608
1643
|
* A stream wrapper to send custom JSON-encoded data back to the client.
|
1644
|
+
*
|
1645
|
+
* @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
|
1609
1646
|
*/
|
1610
1647
|
declare class StreamData {
|
1611
1648
|
private encoder;
|
@@ -1618,11 +1655,6 @@ declare class StreamData {
|
|
1618
1655
|
append(value: JSONValue$1): void;
|
1619
1656
|
appendMessageAnnotation(value: JSONValue$1): void;
|
1620
1657
|
}
|
1621
|
-
/**
|
1622
|
-
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|
1623
|
-
* This assumes every chunk is a 'text' chunk.
|
1624
|
-
*/
|
1625
|
-
declare function createStreamDataTransformer(): TransformStream<any, any>;
|
1626
1658
|
|
1627
1659
|
/**
|
1628
1660
|
A result object for accessing different stream types and additional information.
|
@@ -1720,6 +1752,12 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1720
1752
|
getErrorMessage?: (error: unknown) => string;
|
1721
1753
|
sendUsage?: boolean;
|
1722
1754
|
}): ReadableStream<Uint8Array>;
|
1755
|
+
/**
|
1756
|
+
* Merges the result as a data stream into another data stream.
|
1757
|
+
*
|
1758
|
+
* @param dataStream A data stream writer.
|
1759
|
+
*/
|
1760
|
+
mergeIntoDataStream(dataStream: DataStreamWriter): void;
|
1723
1761
|
/**
|
1724
1762
|
Writes data stream output to a Node.js response-like object.
|
1725
1763
|
|
@@ -1731,7 +1769,7 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1731
1769
|
@param options.getErrorMessage An optional function that converts an error to an error message.
|
1732
1770
|
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
1733
1771
|
*/
|
1734
|
-
pipeDataStreamToResponse(response: ServerResponse
|
1772
|
+
pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & {
|
1735
1773
|
data?: StreamData;
|
1736
1774
|
getErrorMessage?: (error: unknown) => string;
|
1737
1775
|
sendUsage?: boolean;
|
@@ -1744,7 +1782,7 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1744
1782
|
@param response A Node.js response-like object (ServerResponse).
|
1745
1783
|
@param init Optional headers, status code, and status text.
|
1746
1784
|
*/
|
1747
|
-
pipeTextStreamToResponse(response: ServerResponse
|
1785
|
+
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
1748
1786
|
/**
|
1749
1787
|
Converts the result to a streamed response object with a stream data part stream.
|
1750
1788
|
It can be used with the `useChat` and `useCompletion` hooks.
|
@@ -2256,15 +2294,20 @@ The following streams are supported:
|
|
2256
2294
|
- `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
|
2257
2295
|
- `string` streams (LangChain `StringOutputParser` output)
|
2258
2296
|
*/
|
2259
|
-
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<
|
2297
|
+
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
|
2260
2298
|
declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
|
2261
2299
|
init?: ResponseInit;
|
2262
2300
|
data?: StreamData;
|
2263
2301
|
callbacks?: StreamCallbacks;
|
2264
2302
|
}): Response;
|
2303
|
+
declare function mergeIntoDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options: {
|
2304
|
+
dataStream: DataStreamWriter;
|
2305
|
+
callbacks?: StreamCallbacks;
|
2306
|
+
}): void;
|
2265
2307
|
|
2266
2308
|
declare namespace langchainAdapter {
|
2267
2309
|
export {
|
2310
|
+
mergeIntoDataStream$1 as mergeIntoDataStream,
|
2268
2311
|
toDataStream$1 as toDataStream,
|
2269
2312
|
toDataStreamResponse$1 as toDataStreamResponse,
|
2270
2313
|
};
|
@@ -2273,20 +2316,26 @@ declare namespace langchainAdapter {
|
|
2273
2316
|
type EngineResponse = {
|
2274
2317
|
delta: string;
|
2275
2318
|
};
|
2276
|
-
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<
|
2319
|
+
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
|
2277
2320
|
declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
|
2278
2321
|
init?: ResponseInit;
|
2279
2322
|
data?: StreamData;
|
2280
2323
|
callbacks?: StreamCallbacks;
|
2281
2324
|
}): Response;
|
2325
|
+
declare function mergeIntoDataStream(stream: AsyncIterable<EngineResponse>, options: {
|
2326
|
+
dataStream: DataStreamWriter;
|
2327
|
+
callbacks?: StreamCallbacks;
|
2328
|
+
}): void;
|
2282
2329
|
|
2330
|
+
declare const llamaindexAdapter_mergeIntoDataStream: typeof mergeIntoDataStream;
|
2283
2331
|
declare const llamaindexAdapter_toDataStream: typeof toDataStream;
|
2284
2332
|
declare const llamaindexAdapter_toDataStreamResponse: typeof toDataStreamResponse;
|
2285
2333
|
declare namespace llamaindexAdapter {
|
2286
2334
|
export {
|
2335
|
+
llamaindexAdapter_mergeIntoDataStream as mergeIntoDataStream,
|
2287
2336
|
llamaindexAdapter_toDataStream as toDataStream,
|
2288
2337
|
llamaindexAdapter_toDataStreamResponse as toDataStreamResponse,
|
2289
2338
|
};
|
2290
2339
|
}
|
2291
2340
|
|
2292
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity,
|
2341
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, streamObject, streamText, tool };
|
package/dist/index.d.ts
CHANGED
@@ -1,12 +1,48 @@
|
|
1
|
-
import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
1
|
+
import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
3
|
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
|
-
import {
|
5
|
-
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
4
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
6
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
|
+
import { ServerResponse } from 'node:http';
|
7
|
+
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
7
8
|
import { z } from 'zod';
|
8
|
-
import { ServerResponse } from 'http';
|
9
|
-
|
9
|
+
import { ServerResponse as ServerResponse$1 } from 'http';
|
10
|
+
|
11
|
+
interface DataStreamWriter {
|
12
|
+
/**
|
13
|
+
* Appends a data part to the stream.
|
14
|
+
*/
|
15
|
+
writeData(value: JSONValue): void;
|
16
|
+
/**
|
17
|
+
* Appends a message annotation to the stream.
|
18
|
+
*/
|
19
|
+
writeMessageAnnotation(value: JSONValue): void;
|
20
|
+
/**
|
21
|
+
* Merges the contents of another stream to this stream.
|
22
|
+
*/
|
23
|
+
merge(stream: ReadableStream<DataStreamString>): void;
|
24
|
+
/**
|
25
|
+
* Error handler that is used by the data stream writer.
|
26
|
+
* This is intended for forwarding when merging streams
|
27
|
+
* to prevent duplicated error masking.
|
28
|
+
*/
|
29
|
+
onError: ((error: unknown) => string) | undefined;
|
30
|
+
}
|
31
|
+
|
32
|
+
declare function createDataStream({ execute, onError, }: {
|
33
|
+
execute: (dataStream: DataStreamWriter) => Promise<void> | void;
|
34
|
+
onError?: (error: unknown) => string;
|
35
|
+
}): ReadableStream<DataStreamString>;
|
36
|
+
|
37
|
+
declare function createDataStreamResponse({ status, statusText, headers, execute, onError, }: ResponseInit & {
|
38
|
+
execute: (dataStream: DataStreamWriter) => Promise<void> | void;
|
39
|
+
onError?: (error: unknown) => string;
|
40
|
+
}): Response;
|
41
|
+
|
42
|
+
declare function pipeDataStreamToResponse(response: ServerResponse, { status, statusText, headers, execute, onError, }: ResponseInit & {
|
43
|
+
execute: (writer: DataStreamWriter) => Promise<void> | void;
|
44
|
+
onError?: (error: unknown) => string;
|
45
|
+
}): void;
|
10
46
|
|
11
47
|
/**
|
12
48
|
* Telemetry configuration.
|
@@ -943,7 +979,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
|
943
979
|
@param response A Node.js response-like object (ServerResponse).
|
944
980
|
@param init Optional headers, status code, and status text.
|
945
981
|
*/
|
946
|
-
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
982
|
+
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
947
983
|
/**
|
948
984
|
Creates a simple text stream response.
|
949
985
|
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
@@ -1178,6 +1214,21 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1178
1214
|
|
1179
1215
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1180
1216
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
1217
|
+
interface ToolExecutionOptions {
|
1218
|
+
/**
|
1219
|
+
* The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
|
1220
|
+
*/
|
1221
|
+
toolCallId: string;
|
1222
|
+
/**
|
1223
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1224
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1225
|
+
*/
|
1226
|
+
messages: CoreMessage[];
|
1227
|
+
/**
|
1228
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1229
|
+
*/
|
1230
|
+
abortSignal?: AbortSignal;
|
1231
|
+
}
|
1181
1232
|
/**
|
1182
1233
|
A tool contains the description and the schema of the input that the tool expects.
|
1183
1234
|
This enables the language model to generate the input.
|
@@ -1202,17 +1253,7 @@ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
|
1202
1253
|
@args is the input of the tool call.
|
1203
1254
|
@options.abortSignal is a signal that can be used to abort the tool call.
|
1204
1255
|
*/
|
1205
|
-
execute?: (args: inferParameters<PARAMETERS>, options:
|
1206
|
-
/**
|
1207
|
-
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1208
|
-
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1209
|
-
*/
|
1210
|
-
messages: CoreMessage[];
|
1211
|
-
/**
|
1212
|
-
* An optional abort signal that indicates that the overall operation should be aborted.
|
1213
|
-
*/
|
1214
|
-
abortSignal?: AbortSignal;
|
1215
|
-
}) => PromiseLike<RESULT>;
|
1256
|
+
execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1216
1257
|
} & ({
|
1217
1258
|
/**
|
1218
1259
|
Function tool.
|
@@ -1240,15 +1281,9 @@ The arguments for configuring the tool. Must match the expected arguments define
|
|
1240
1281
|
Helper function for inferring the execute args of a tool.
|
1241
1282
|
*/
|
1242
1283
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1243
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1244
|
-
messages: CoreMessage[];
|
1245
|
-
abortSignal?: AbortSignal;
|
1246
|
-
}) => PromiseLike<RESULT>;
|
1284
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1247
1285
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1248
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1249
|
-
messages: CoreMessage[];
|
1250
|
-
abortSignal?: AbortSignal;
|
1251
|
-
}) => PromiseLike<RESULT>;
|
1286
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1252
1287
|
};
|
1253
1288
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1254
1289
|
execute?: undefined;
|
@@ -1606,6 +1641,8 @@ changing the tool call and result types in the result.
|
|
1606
1641
|
|
1607
1642
|
/**
|
1608
1643
|
* A stream wrapper to send custom JSON-encoded data back to the client.
|
1644
|
+
*
|
1645
|
+
* @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
|
1609
1646
|
*/
|
1610
1647
|
declare class StreamData {
|
1611
1648
|
private encoder;
|
@@ -1618,11 +1655,6 @@ declare class StreamData {
|
|
1618
1655
|
append(value: JSONValue$1): void;
|
1619
1656
|
appendMessageAnnotation(value: JSONValue$1): void;
|
1620
1657
|
}
|
1621
|
-
/**
|
1622
|
-
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|
1623
|
-
* This assumes every chunk is a 'text' chunk.
|
1624
|
-
*/
|
1625
|
-
declare function createStreamDataTransformer(): TransformStream<any, any>;
|
1626
1658
|
|
1627
1659
|
/**
|
1628
1660
|
A result object for accessing different stream types and additional information.
|
@@ -1720,6 +1752,12 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1720
1752
|
getErrorMessage?: (error: unknown) => string;
|
1721
1753
|
sendUsage?: boolean;
|
1722
1754
|
}): ReadableStream<Uint8Array>;
|
1755
|
+
/**
|
1756
|
+
* Merges the result as a data stream into another data stream.
|
1757
|
+
*
|
1758
|
+
* @param dataStream A data stream writer.
|
1759
|
+
*/
|
1760
|
+
mergeIntoDataStream(dataStream: DataStreamWriter): void;
|
1723
1761
|
/**
|
1724
1762
|
Writes data stream output to a Node.js response-like object.
|
1725
1763
|
|
@@ -1731,7 +1769,7 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1731
1769
|
@param options.getErrorMessage An optional function that converts an error to an error message.
|
1732
1770
|
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
1733
1771
|
*/
|
1734
|
-
pipeDataStreamToResponse(response: ServerResponse
|
1772
|
+
pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & {
|
1735
1773
|
data?: StreamData;
|
1736
1774
|
getErrorMessage?: (error: unknown) => string;
|
1737
1775
|
sendUsage?: boolean;
|
@@ -1744,7 +1782,7 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1744
1782
|
@param response A Node.js response-like object (ServerResponse).
|
1745
1783
|
@param init Optional headers, status code, and status text.
|
1746
1784
|
*/
|
1747
|
-
pipeTextStreamToResponse(response: ServerResponse
|
1785
|
+
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
1748
1786
|
/**
|
1749
1787
|
Converts the result to a streamed response object with a stream data part stream.
|
1750
1788
|
It can be used with the `useChat` and `useCompletion` hooks.
|
@@ -2256,15 +2294,20 @@ The following streams are supported:
|
|
2256
2294
|
- `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
|
2257
2295
|
- `string` streams (LangChain `StringOutputParser` output)
|
2258
2296
|
*/
|
2259
|
-
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<
|
2297
|
+
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
|
2260
2298
|
declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
|
2261
2299
|
init?: ResponseInit;
|
2262
2300
|
data?: StreamData;
|
2263
2301
|
callbacks?: StreamCallbacks;
|
2264
2302
|
}): Response;
|
2303
|
+
declare function mergeIntoDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options: {
|
2304
|
+
dataStream: DataStreamWriter;
|
2305
|
+
callbacks?: StreamCallbacks;
|
2306
|
+
}): void;
|
2265
2307
|
|
2266
2308
|
declare namespace langchainAdapter {
|
2267
2309
|
export {
|
2310
|
+
mergeIntoDataStream$1 as mergeIntoDataStream,
|
2268
2311
|
toDataStream$1 as toDataStream,
|
2269
2312
|
toDataStreamResponse$1 as toDataStreamResponse,
|
2270
2313
|
};
|
@@ -2273,20 +2316,26 @@ declare namespace langchainAdapter {
|
|
2273
2316
|
type EngineResponse = {
|
2274
2317
|
delta: string;
|
2275
2318
|
};
|
2276
|
-
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<
|
2319
|
+
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
|
2277
2320
|
declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
|
2278
2321
|
init?: ResponseInit;
|
2279
2322
|
data?: StreamData;
|
2280
2323
|
callbacks?: StreamCallbacks;
|
2281
2324
|
}): Response;
|
2325
|
+
declare function mergeIntoDataStream(stream: AsyncIterable<EngineResponse>, options: {
|
2326
|
+
dataStream: DataStreamWriter;
|
2327
|
+
callbacks?: StreamCallbacks;
|
2328
|
+
}): void;
|
2282
2329
|
|
2330
|
+
declare const llamaindexAdapter_mergeIntoDataStream: typeof mergeIntoDataStream;
|
2283
2331
|
declare const llamaindexAdapter_toDataStream: typeof toDataStream;
|
2284
2332
|
declare const llamaindexAdapter_toDataStreamResponse: typeof toDataStreamResponse;
|
2285
2333
|
declare namespace llamaindexAdapter {
|
2286
2334
|
export {
|
2335
|
+
llamaindexAdapter_mergeIntoDataStream as mergeIntoDataStream,
|
2287
2336
|
llamaindexAdapter_toDataStream as toDataStream,
|
2288
2337
|
llamaindexAdapter_toDataStreamResponse as toDataStreamResponse,
|
2289
2338
|
};
|
2290
2339
|
}
|
2291
2340
|
|
2292
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity,
|
2341
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, streamObject, streamText, tool };
|