ai 3.0.3 → 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,998 @@
1
+ import { ChatCompletionResponseChunk } from '@mistralai/mistralai';
2
+ import { ServerResponse } from 'node:http';
3
+
4
+ interface FunctionCall {
5
+ /**
6
+ * The arguments to call the function with, as generated by the model in JSON
7
+ * format. Note that the model does not always generate valid JSON, and may
8
+ * hallucinate parameters not defined by your function schema. Validate the
9
+ * arguments in your code before calling your function.
10
+ */
11
+ arguments?: string;
12
+ /**
13
+ * The name of the function to call.
14
+ */
15
+ name?: string;
16
+ }
17
+ /**
18
+ * The tool calls generated by the model, such as function calls.
19
+ */
20
+ interface ToolCall {
21
+ id: string;
22
+ type: string;
23
+ function: {
24
+ name: string;
25
+ arguments: string;
26
+ };
27
+ }
28
+ /**
29
+ * Controls which (if any) function is called by the model.
30
+ * - none means the model will not call a function and instead generates a message.
31
+ * - auto means the model can pick between generating a message or calling a function.
32
+ * - Specifying a particular function via {"type: "function", "function": {"name": "my_function"}} forces the model to call that function.
33
+ * none is the default when no functions are present. auto is the default if functions are present.
34
+ */
35
+ type ToolChoice = 'none' | 'auto' | {
36
+ type: 'function';
37
+ function: {
38
+ name: string;
39
+ };
40
+ };
41
+ /**
42
+ * A list of tools the model may call. Currently, only functions are supported as a tool.
43
+ * Use this to provide a list of functions the model may generate JSON inputs for.
44
+ */
45
+ interface Tool {
46
+ type: 'function';
47
+ function: Function;
48
+ }
49
+ interface Function {
50
+ /**
51
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
52
+ * underscores and dashes, with a maximum length of 64.
53
+ */
54
+ name: string;
55
+ /**
56
+ * The parameters the functions accepts, described as a JSON Schema object. See the
57
+ * [guide](/docs/guides/gpt/function-calling) for examples, and the
58
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
59
+ * documentation about the format.
60
+ *
61
+ * To describe a function that accepts no parameters, provide the value
62
+ * `{"type": "object", "properties": {}}`.
63
+ */
64
+ parameters: Record<string, unknown>;
65
+ /**
66
+ * A description of what the function does, used by the model to choose when and
67
+ * how to call the function.
68
+ */
69
+ description?: string;
70
+ }
71
+ type IdGenerator = () => string;
72
+ /**
73
+ * Shared types between the API and UI packages.
74
+ */
75
+ interface Message$1 {
76
+ id: string;
77
+ tool_call_id?: string;
78
+ createdAt?: Date;
79
+ content: string;
80
+ ui?: string | JSX.Element | JSX.Element[] | null | undefined;
81
+ role: 'system' | 'user' | 'assistant' | 'function' | 'data' | 'tool';
82
+ /**
83
+ * If the message has a role of `function`, the `name` field is the name of the function.
84
+ * Otherwise, the name field should not be set.
85
+ */
86
+ name?: string;
87
+ /**
88
+ * If the assistant role makes a function call, the `function_call` field
89
+ * contains the function call name and arguments. Otherwise, the field should
90
+ * not be set. (Deprecated and replaced by tool_calls.)
91
+ */
92
+ function_call?: string | FunctionCall;
93
+ data?: JSONValue;
94
+ /**
95
+ * If the assistant role makes a tool call, the `tool_calls` field contains
96
+ * the tool call name and arguments. Otherwise, the field should not be set.
97
+ */
98
+ tool_calls?: string | ToolCall[];
99
+ /**
100
+ * Additional message-specific information added on the server via StreamData
101
+ */
102
+ annotations?: JSONValue[] | undefined;
103
+ }
104
+ type CreateMessage = Omit<Message$1, 'id'> & {
105
+ id?: Message$1['id'];
106
+ };
107
+ type ChatRequest = {
108
+ messages: Message$1[];
109
+ options?: RequestOptions;
110
+ functions?: Array<Function>;
111
+ function_call?: FunctionCall;
112
+ data?: Record<string, string>;
113
+ tools?: Array<Tool>;
114
+ tool_choice?: ToolChoice;
115
+ };
116
+ type FunctionCallHandler = (chatMessages: Message$1[], functionCall: FunctionCall) => Promise<ChatRequest | void>;
117
+ type ToolCallHandler = (chatMessages: Message$1[], toolCalls: ToolCall[]) => Promise<ChatRequest | void>;
118
+ type RequestOptions = {
119
+ headers?: Record<string, string> | Headers;
120
+ body?: object;
121
+ };
122
+ type ChatRequestOptions = {
123
+ options?: RequestOptions;
124
+ functions?: Array<Function>;
125
+ function_call?: FunctionCall;
126
+ tools?: Array<Tool>;
127
+ tool_choice?: ToolChoice;
128
+ data?: Record<string, string>;
129
+ };
130
+ type UseChatOptions = {
131
+ /**
132
+ * The API endpoint that accepts a `{ messages: Message[] }` object and returns
133
+ * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
134
+ */
135
+ api?: string;
136
+ /**
137
+ * A unique identifier for the chat. If not provided, a random one will be
138
+ * generated. When provided, the `useChat` hook with the same `id` will
139
+ * have shared states across components.
140
+ */
141
+ id?: string;
142
+ /**
143
+ * Initial messages of the chat. Useful to load an existing chat history.
144
+ */
145
+ initialMessages?: Message$1[];
146
+ /**
147
+ * Initial input of the chat.
148
+ */
149
+ initialInput?: string;
150
+ /**
151
+ * Callback function to be called when a function call is received.
152
+ * If the function returns a `ChatRequest` object, the request will be sent
153
+ * automatically to the API and will be used to update the chat.
154
+ */
155
+ experimental_onFunctionCall?: FunctionCallHandler;
156
+ /**
157
+ * Callback function to be called when a tool call is received.
158
+ * If the function returns a `ChatRequest` object, the request will be sent
159
+ * automatically to the API and will be used to update the chat.
160
+ */
161
+ experimental_onToolCall?: ToolCallHandler;
162
+ /**
163
+ * Callback function to be called when the API response is received.
164
+ */
165
+ onResponse?: (response: Response) => void | Promise<void>;
166
+ /**
167
+ * Callback function to be called when the chat is finished streaming.
168
+ */
169
+ onFinish?: (message: Message$1) => void;
170
+ /**
171
+ * Callback function to be called when an error is encountered.
172
+ */
173
+ onError?: (error: Error) => void;
174
+ /**
175
+ * A way to provide a function that is going to be used for ids for messages.
176
+ * If not provided nanoid is used by default.
177
+ */
178
+ generateId?: IdGenerator;
179
+ /**
180
+ * The credentials mode to be used for the fetch request.
181
+ * Possible values are: 'omit', 'same-origin', 'include'.
182
+ * Defaults to 'same-origin'.
183
+ */
184
+ credentials?: RequestCredentials;
185
+ /**
186
+ * HTTP headers to be sent with the API request.
187
+ */
188
+ headers?: Record<string, string> | Headers;
189
+ /**
190
+ * Extra body object to be sent with the API request.
191
+ * @example
192
+ * Send a `sessionId` to the API along with the messages.
193
+ * ```js
194
+ * useChat({
195
+ * body: {
196
+ * sessionId: '123',
197
+ * }
198
+ * })
199
+ * ```
200
+ */
201
+ body?: object;
202
+ /**
203
+ * Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
204
+ * Defaults to `false`. When set to `true`, the API endpoint might need to
205
+ * handle the extra fields before forwarding the request to the AI service.
206
+ */
207
+ sendExtraMessageFields?: boolean;
208
+ };
209
+ type UseCompletionOptions = {
210
+ /**
211
+ * The API endpoint that accepts a `{ prompt: string }` object and returns
212
+ * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
213
+ */
214
+ api?: string;
215
+ /**
216
+ * An unique identifier for the chat. If not provided, a random one will be
217
+ * generated. When provided, the `useChat` hook with the same `id` will
218
+ * have shared states across components.
219
+ */
220
+ id?: string;
221
+ /**
222
+ * Initial prompt input of the completion.
223
+ */
224
+ initialInput?: string;
225
+ /**
226
+ * Initial completion result. Useful to load an existing history.
227
+ */
228
+ initialCompletion?: string;
229
+ /**
230
+ * Callback function to be called when the API response is received.
231
+ */
232
+ onResponse?: (response: Response) => void | Promise<void>;
233
+ /**
234
+ * Callback function to be called when the completion is finished streaming.
235
+ */
236
+ onFinish?: (prompt: string, completion: string) => void;
237
+ /**
238
+ * Callback function to be called when an error is encountered.
239
+ */
240
+ onError?: (error: Error) => void;
241
+ /**
242
+ * The credentials mode to be used for the fetch request.
243
+ * Possible values are: 'omit', 'same-origin', 'include'.
244
+ * Defaults to 'same-origin'.
245
+ */
246
+ credentials?: RequestCredentials;
247
+ /**
248
+ * HTTP headers to be sent with the API request.
249
+ */
250
+ headers?: Record<string, string> | Headers;
251
+ /**
252
+ * Extra body object to be sent with the API request.
253
+ * @example
254
+ * Send a `sessionId` to the API along with the prompt.
255
+ * ```js
256
+ * useChat({
257
+ * body: {
258
+ * sessionId: '123',
259
+ * }
260
+ * })
261
+ * ```
262
+ */
263
+ body?: object;
264
+ };
265
+ type JSONValue = null | string | number | boolean | {
266
+ [x: string]: JSONValue;
267
+ } | Array<JSONValue>;
268
+ type AssistantMessage = {
269
+ id: string;
270
+ role: 'assistant';
271
+ content: Array<{
272
+ type: 'text';
273
+ text: {
274
+ value: string;
275
+ };
276
+ }>;
277
+ };
278
+ type DataMessage = {
279
+ id?: string;
280
+ role: 'data';
281
+ data: JSONValue;
282
+ };
283
+
284
+ interface StreamPart<CODE extends string, NAME extends string, TYPE> {
285
+ code: CODE;
286
+ name: NAME;
287
+ parse: (value: JSONValue) => {
288
+ type: NAME;
289
+ value: TYPE;
290
+ };
291
+ }
292
+ declare const textStreamPart: StreamPart<'0', 'text', string>;
293
+ declare const functionCallStreamPart: StreamPart<'1', 'function_call', {
294
+ function_call: FunctionCall;
295
+ }>;
296
+ declare const dataStreamPart: StreamPart<'2', 'data', Array<JSONValue>>;
297
+ declare const errorStreamPart: StreamPart<'3', 'error', string>;
298
+ declare const assistantMessageStreamPart: StreamPart<'4', 'assistant_message', AssistantMessage>;
299
+ declare const assistantControlDataStreamPart: StreamPart<'5', 'assistant_control_data', {
300
+ threadId: string;
301
+ messageId: string;
302
+ }>;
303
+ declare const dataMessageStreamPart: StreamPart<'6', 'data_message', DataMessage>;
304
+ declare const toolCallStreamPart: StreamPart<'7', 'tool_calls', {
305
+ tool_calls: ToolCall[];
306
+ }>;
307
+ declare const messageAnnotationsStreamPart: StreamPart<'8', 'message_annotations', Array<JSONValue>>;
308
+ type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse>;
309
+ /**
310
+ * The map of prefixes for data in the stream
311
+ *
312
+ * - 0: Text from the LLM response
313
+ * - 1: (OpenAI) function_call responses
314
+ * - 2: custom JSON added by the user using `Data`
315
+ * - 6: (OpenAI) tool_call responses
316
+ *
317
+ * Example:
318
+ * ```
319
+ * 0:Vercel
320
+ * 0:'s
321
+ * 0: AI
322
+ * 0: AI
323
+ * 0: SDK
324
+ * 0: is great
325
+ * 0:!
326
+ * 2: { "someJson": "value" }
327
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
328
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
329
+ *```
330
+ */
331
+ declare const StreamStringPrefixes: {
332
+ readonly text: "0";
333
+ readonly function_call: "1";
334
+ readonly data: "2";
335
+ readonly error: "3";
336
+ readonly assistant_message: "4";
337
+ readonly assistant_control_data: "5";
338
+ readonly data_message: "6";
339
+ readonly tool_calls: "7";
340
+ readonly message_annotations: "8";
341
+ };
342
+
343
+ declare const nanoid: (size?: number | undefined) => string;
344
+ declare function createChunkDecoder(): (chunk: Uint8Array | undefined) => string;
345
+ declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefined) => string;
346
+ declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
347
+ declare function createChunkDecoder(complex?: boolean): (chunk: Uint8Array | undefined) => StreamPartType[] | string;
348
+
349
+ declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n`;
350
+ type StreamString = `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`;
351
+ /**
352
+ * A header sent to the client so it knows how to handle parsing the stream (as a deprecated text response or using the new prefixed protocol)
353
+ */
354
+ declare const COMPLEX_HEADER = "X-Experimental-Stream-Data";
355
+
356
+ declare interface AzureChatCompletions {
357
+ id: string;
358
+ created: Date;
359
+ choices: AzureChatChoice[];
360
+ systemFingerprint?: string;
361
+ usage?: AzureCompletionsUsage;
362
+ promptFilterResults: any[];
363
+ }
364
+ declare interface AzureChatChoice {
365
+ message?: AzureChatResponseMessage;
366
+ index: number;
367
+ finishReason: string | null;
368
+ delta?: AzureChatResponseMessage;
369
+ }
370
+ declare interface AzureChatResponseMessage {
371
+ role: string;
372
+ content: string | null;
373
+ toolCalls: AzureChatCompletionsFunctionToolCall[];
374
+ functionCall?: AzureFunctionCall;
375
+ }
376
+ declare interface AzureCompletionsUsage {
377
+ completionTokens: number;
378
+ promptTokens: number;
379
+ totalTokens: number;
380
+ }
381
+ declare interface AzureFunctionCall {
382
+ name: string;
383
+ arguments: string;
384
+ }
385
+ declare interface AzureChatCompletionsFunctionToolCall {
386
+ type: 'function';
387
+ function: AzureFunctionCall;
388
+ id: string;
389
+ }
390
+
391
+ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
392
+ /**
393
+ * @example
394
+ * ```js
395
+ * const response = await openai.chat.completions.create({
396
+ * model: 'gpt-3.5-turbo-0613',
397
+ * stream: true,
398
+ * messages,
399
+ * functions,
400
+ * })
401
+ *
402
+ * const stream = OpenAIStream(response, {
403
+ * experimental_onFunctionCall: async (functionCallPayload, createFunctionCallMessages) => {
404
+ * // ... run your custom logic here
405
+ * const result = await myFunction(functionCallPayload)
406
+ *
407
+ * // Ask for another completion, or return a string to send to the client as an assistant message.
408
+ * return await openai.chat.completions.create({
409
+ * model: 'gpt-3.5-turbo-0613',
410
+ * stream: true,
411
+ * // Append the relevant "assistant" and "function" call messages
412
+ * messages: [...messages, ...createFunctionCallMessages(result)],
413
+ * functions,
414
+ * })
415
+ * }
416
+ * })
417
+ * ```
418
+ */
419
+ experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
420
+ /**
421
+ * @example
422
+ * ```js
423
+ * const response = await openai.chat.completions.create({
424
+ * model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
425
+ * stream: true,
426
+ * messages,
427
+ * tools,
428
+ * tool_choice: "auto", // auto is default, but we'll be explicit
429
+ * })
430
+ *
431
+ * const stream = OpenAIStream(response, {
432
+ * experimental_onToolCall: async (toolCallPayload, appendToolCallMessages) => {
433
+ * let messages: CreateMessage[] = []
434
+ * // There might be multiple tool calls, so we need to iterate through them
435
+ * for (const tool of toolCallPayload.tools) {
436
+ * // ... run your custom logic here
437
+ * const result = await myFunction(tool.function)
438
+ * // Append the relevant "assistant" and "tool" call messages
439
+ * appendToolCallMessage({tool_call_id:tool.id, function_name:tool.function.name, tool_call_result:result})
440
+ * }
441
+ * // Ask for another completion, or return a string to send to the client as an assistant message.
442
+ * return await openai.chat.completions.create({
443
+ * model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
444
+ * stream: true,
445
+ * // Append the results messages, calling appendToolCallMessage without
446
+ * // any arguments will jsut return the accumulated messages
447
+ * messages: [...messages, ...appendToolCallMessage()],
448
+ * tools,
449
+ * tool_choice: "auto", // auto is default, but we'll be explicit
450
+ * })
451
+ * }
452
+ * })
453
+ * ```
454
+ */
455
+ experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
456
+ tool_call_id: string;
457
+ function_name: string;
458
+ tool_call_result: JSONValue;
459
+ }) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
460
+ };
461
+ interface ChatCompletionChunk {
462
+ id: string;
463
+ choices: Array<ChatCompletionChunkChoice>;
464
+ created: number;
465
+ model: string;
466
+ object: string;
467
+ }
468
+ interface ChatCompletionChunkChoice {
469
+ delta: ChoiceDelta;
470
+ finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
471
+ index: number;
472
+ }
473
+ interface ChoiceDelta {
474
+ /**
475
+ * The contents of the chunk message.
476
+ */
477
+ content?: string | null;
478
+ /**
479
+ * The name and arguments of a function that should be called, as generated by the
480
+ * model.
481
+ */
482
+ function_call?: FunctionCall;
483
+ /**
484
+ * The role of the author of this message.
485
+ */
486
+ role?: 'system' | 'user' | 'assistant' | 'tool';
487
+ tool_calls?: Array<DeltaToolCall>;
488
+ }
489
+ interface DeltaToolCall {
490
+ index: number;
491
+ /**
492
+ * The ID of the tool call.
493
+ */
494
+ id?: string;
495
+ /**
496
+ * The function that the model called.
497
+ */
498
+ function?: ToolCallFunction;
499
+ /**
500
+ * The type of the tool. Currently, only `function` is supported.
501
+ */
502
+ type?: 'function';
503
+ }
504
+ interface ToolCallFunction {
505
+ /**
506
+ * The arguments to call the function with, as generated by the model in JSON
507
+ * format. Note that the model does not always generate valid JSON, and may
508
+ * hallucinate parameters not defined by your function schema. Validate the
509
+ * arguments in your code before calling your function.
510
+ */
511
+ arguments?: string;
512
+ /**
513
+ * The name of the function to call.
514
+ */
515
+ name?: string;
516
+ }
517
+ /**
518
+ * https://github.com/openai/openai-node/blob/3ec43ee790a2eb6a0ccdd5f25faa23251b0f9b8e/src/resources/completions.ts#L28C1-L64C1
519
+ * Completions API. Streamed and non-streamed responses are the same.
520
+ */
521
+ interface Completion {
522
+ /**
523
+ * A unique identifier for the completion.
524
+ */
525
+ id: string;
526
+ /**
527
+ * The list of completion choices the model generated for the input prompt.
528
+ */
529
+ choices: Array<CompletionChoice>;
530
+ /**
531
+ * The Unix timestamp of when the completion was created.
532
+ */
533
+ created: number;
534
+ /**
535
+ * The model used for completion.
536
+ */
537
+ model: string;
538
+ /**
539
+ * The object type, which is always "text_completion"
540
+ */
541
+ object: string;
542
+ /**
543
+ * Usage statistics for the completion request.
544
+ */
545
+ usage?: CompletionUsage;
546
+ }
547
+ interface CompletionChoice {
548
+ /**
549
+ * The reason the model stopped generating tokens. This will be `stop` if the model
550
+ * hit a natural stop point or a provided stop sequence, or `length` if the maximum
551
+ * number of tokens specified in the request was reached.
552
+ */
553
+ finish_reason: 'stop' | 'length' | 'content_filter';
554
+ index: number;
555
+ logprobs: any | null;
556
+ text: string;
557
+ }
558
+ interface CompletionUsage {
559
+ /**
560
+ * Usage statistics for the completion request.
561
+ */
562
+ /**
563
+ * Number of tokens in the generated completion.
564
+ */
565
+ completion_tokens: number;
566
+ /**
567
+ * Number of tokens in the prompt.
568
+ */
569
+ prompt_tokens: number;
570
+ /**
571
+ * Total number of tokens used in the request (prompt + completion).
572
+ */
573
+ total_tokens: number;
574
+ }
575
+ type AsyncIterableOpenAIStreamReturnTypes = AsyncIterable<ChatCompletionChunk> | AsyncIterable<Completion> | AsyncIterable<AzureChatCompletions>;
576
+ declare function OpenAIStream(res: Response | AsyncIterableOpenAIStreamReturnTypes, callbacks?: OpenAIStreamCallbacks): ReadableStream;
577
+
578
+ interface FunctionCallPayload {
579
+ name: string;
580
+ arguments: Record<string, unknown>;
581
+ }
582
+ interface ToolCallPayload {
583
+ tools: {
584
+ id: string;
585
+ type: 'function';
586
+ func: {
587
+ name: string;
588
+ arguments: Record<string, unknown>;
589
+ };
590
+ }[];
591
+ }
592
+ /**
593
+ * Configuration options and helper callback methods for AIStream stream lifecycle events.
594
+ * @interface
595
+ */
596
+ interface AIStreamCallbacksAndOptions {
597
+ /** `onStart`: Called once when the stream is initialized. */
598
+ onStart?: () => Promise<void> | void;
599
+ /** `onCompletion`: Called for each tokenized message. */
600
+ onCompletion?: (completion: string) => Promise<void> | void;
601
+ /** `onFinal`: Called once when the stream is closed with the final completion message. */
602
+ onFinal?: (completion: string) => Promise<void> | void;
603
+ /** `onToken`: Called for each tokenized message. */
604
+ onToken?: (token: string) => Promise<void> | void;
605
+ /** `onText`: Called for each text chunk. */
606
+ onText?: (text: string) => Promise<void> | void;
607
+ /**
608
+ * A flag for enabling the experimental_StreamData class and the new protocol.
609
+ * @see https://github.com/vercel-labs/ai/pull/425
610
+ *
611
+ * When StreamData is rolled out, this will be removed and the new protocol will be used by default.
612
+ */
613
+ experimental_streamData?: boolean;
614
+ }
615
+ /**
616
+ * Options for the AIStreamParser.
617
+ * @interface
618
+ * @property {string} event - The event (type) from the server side event stream.
619
+ */
620
+ interface AIStreamParserOptions {
621
+ event?: string;
622
+ }
623
+ /**
624
+ * Custom parser for AIStream data.
625
+ * @interface
626
+ * @param {string} data - The data to be parsed.
627
+ * @param {AIStreamParserOptions} options - The options for the parser.
628
+ * @returns {string | void} The parsed data or void.
629
+ */
630
+ interface AIStreamParser {
631
+ (data: string, options: AIStreamParserOptions): string | void | {
632
+ isText: false;
633
+ content: string;
634
+ };
635
+ }
636
+ /**
637
+ * Creates a TransformStream that parses events from an EventSource stream using a custom parser.
638
+ * @param {AIStreamParser} customParser - Function to handle event data.
639
+ * @returns {TransformStream<Uint8Array, string>} TransformStream parsing events.
640
+ */
641
+ declare function createEventStreamTransformer(customParser?: AIStreamParser): TransformStream<Uint8Array, string | {
642
+ isText: false;
643
+ content: string;
644
+ }>;
645
+ /**
646
+ * Creates a transform stream that encodes input messages and invokes optional callback functions.
647
+ * The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
648
+ * - `onStart`: Called once when the stream is initialized.
649
+ * - `onToken`: Called for each tokenized message.
650
+ * - `onCompletion`: Called every time an AIStream completion message is received. This can occur multiple times when using e.g. OpenAI functions
651
+ * - `onFinal`: Called once when the stream is closed with the final completion message.
652
+ *
653
+ * This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
654
+ *
655
+ * @param {AIStreamCallbacksAndOptions} [callbacks] - An object containing the callback functions.
656
+ * @return {TransformStream<string, Uint8Array>} A transform stream that encodes input messages as Uint8Array and allows the execution of custom logic through callbacks.
657
+ *
658
+ * @example
659
+ * const callbacks = {
660
+ * onStart: async () => console.log('Stream started'),
661
+ * onToken: async (token) => console.log(`Token: ${token}`),
662
+ * onCompletion: async (completion) => console.log(`Completion: ${completion}`)
663
+ * onFinal: async () => data.close()
664
+ * };
665
+ * const transformer = createCallbacksTransformer(callbacks);
666
+ */
667
+ declare function createCallbacksTransformer(cb: AIStreamCallbacksAndOptions | OpenAIStreamCallbacks | undefined): TransformStream<string | {
668
+ isText: false;
669
+ content: string;
670
+ }, Uint8Array>;
671
+ /**
672
+ * Returns a stateful function that, when invoked, trims leading whitespace
673
+ * from the input text. The trimming only occurs on the first invocation, ensuring that
674
+ * subsequent calls do not alter the input text. This is particularly useful in scenarios
675
+ * where a text stream is being processed and only the initial whitespace should be removed.
676
+ *
677
+ * @return {function(string): string} A function that takes a string as input and returns a string
678
+ * with leading whitespace removed if it is the first invocation; otherwise, it returns the input unchanged.
679
+ *
680
+ * @example
681
+ * const trimStart = trimStartOfStreamHelper();
682
+ * const output1 = trimStart(" text"); // "text"
683
+ * const output2 = trimStart(" text"); // " text"
684
+ *
685
+ */
686
+ declare function trimStartOfStreamHelper(): (text: string) => string;
687
+ /**
688
+ * Returns a ReadableStream created from the response, parsed and handled with custom logic.
689
+ * The stream goes through two transformation stages, first parsing the events and then
690
+ * invoking the provided callbacks.
691
+ *
692
+ * For 2xx HTTP responses:
693
+ * - The function continues with standard stream processing.
694
+ *
695
+ * For non-2xx HTTP responses:
696
+ * - If the response body is defined, it asynchronously extracts and decodes the response body.
697
+ * - It then creates a custom ReadableStream to propagate a detailed error message.
698
+ *
699
+ * @param {Response} response - The response.
700
+ * @param {AIStreamParser} customParser - The custom parser function.
701
+ * @param {AIStreamCallbacksAndOptions} callbacks - The callbacks.
702
+ * @return {ReadableStream} The AIStream.
703
+ * @throws Will throw an error if the response is not OK.
704
+ */
705
+ declare function AIStream(response: Response, customParser?: AIStreamParser, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
706
+ /**
707
+ * Implements ReadableStream.from(asyncIterable), which isn't documented in MDN and isn't implemented in node.
708
+ * https://github.com/whatwg/streams/commit/8d7a0bf26eb2cc23e884ddbaac7c1da4b91cf2bc
709
+ */
710
+ declare function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>): ReadableStream<T>;
711
+
712
+ interface CompletionChunk {
713
+ /**
714
+ * Unique object identifier.
715
+ *
716
+ * The format and length of IDs may change over time.
717
+ */
718
+ id: string;
719
+ /**
720
+ * The resulting completion up to and excluding the stop sequences.
721
+ */
722
+ completion: string;
723
+ /**
724
+ * The model that handled the request.
725
+ */
726
+ model: string;
727
+ /**
728
+ * The reason that we stopped.
729
+ *
730
+ * This may be one the following values:
731
+ *
732
+ * - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
733
+ * `stop_sequences` parameter, or a stop sequence built into the model
734
+ * - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
735
+ */
736
+ stop_reason: string | null;
737
+ /**
738
+ * Object type.
739
+ *
740
+ * For Text Completions, this is always `"completion"`.
741
+ */
742
+ type: 'completion';
743
+ }
744
+ interface Message {
745
+ id: string;
746
+ content: Array<ContentBlock>;
747
+ model: string;
748
+ role: 'assistant';
749
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
750
+ stop_sequence: string | null;
751
+ type: 'message';
752
+ }
753
+ interface ContentBlock {
754
+ text: string;
755
+ type: 'text';
756
+ }
757
+ interface TextDelta {
758
+ text: string;
759
+ type: 'text_delta';
760
+ }
761
+ interface ContentBlockDeltaEvent {
762
+ delta: TextDelta;
763
+ index: number;
764
+ type: 'content_block_delta';
765
+ }
766
+ interface ContentBlockStartEvent {
767
+ content_block: ContentBlock;
768
+ index: number;
769
+ type: 'content_block_start';
770
+ }
771
+ interface ContentBlockStopEvent {
772
+ index: number;
773
+ type: 'content_block_stop';
774
+ }
775
+ interface MessageDeltaEventDelta {
776
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
777
+ stop_sequence: string | null;
778
+ }
779
+ interface MessageDeltaEvent {
780
+ delta: MessageDeltaEventDelta;
781
+ type: 'message_delta';
782
+ }
783
+ type MessageStreamEvent = MessageStartEvent | MessageDeltaEvent | MessageStopEvent | ContentBlockStartEvent | ContentBlockDeltaEvent | ContentBlockStopEvent;
784
+ interface MessageStartEvent {
785
+ message: Message;
786
+ type: 'message_start';
787
+ }
788
+ interface MessageStopEvent {
789
+ type: 'message_stop';
790
+ }
791
+ /**
792
+ * Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
793
+ * or the return value of `await client.completions.create({ stream: true })`
794
+ * from the `@anthropic-ai/sdk` package.
795
+ */
796
+ declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
797
+
798
+ type AssistantResponseSettings = {
799
+ threadId: string;
800
+ messageId: string;
801
+ };
802
+ type AssistantResponseCallback = (stream: {
803
+ threadId: string;
804
+ messageId: string;
805
+ sendMessage: (message: AssistantMessage) => void;
806
+ sendDataMessage: (message: DataMessage) => void;
807
+ }) => Promise<void>;
808
+ declare function experimental_AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
809
+
810
+ interface AWSBedrockResponse {
811
+ body?: AsyncIterable<{
812
+ chunk?: {
813
+ bytes?: Uint8Array;
814
+ };
815
+ }>;
816
+ }
817
+ declare function AWSBedrockAnthropicStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
818
+ declare function AWSBedrockCohereStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
819
+ declare function AWSBedrockLlama2Stream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
820
+ declare function AWSBedrockStream(response: AWSBedrockResponse, callbacks: AIStreamCallbacksAndOptions | undefined, extractTextDeltaFromChunk: (chunk: any) => string): ReadableStream<any>;
821
+
822
+ interface StreamChunk {
823
+ text?: string;
824
+ eventType: 'stream-start' | 'search-queries-generation' | 'search-results' | 'text-generation' | 'citation-generation' | 'stream-end';
825
+ }
826
+ declare function CohereStream(reader: Response | AsyncIterable<StreamChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
827
+
828
+ interface GenerateContentResponse {
829
+ candidates?: GenerateContentCandidate[];
830
+ }
831
+ interface GenerateContentCandidate {
832
+ index: number;
833
+ content: Content;
834
+ }
835
+ interface Content {
836
+ role: string;
837
+ parts: Part[];
838
+ }
839
+ type Part = TextPart | InlineDataPart;
840
+ interface InlineDataPart {
841
+ text?: never;
842
+ }
843
+ interface TextPart {
844
+ text: string;
845
+ inlineData?: never;
846
+ }
847
+ declare function GoogleGenerativeAIStream(response: {
848
+ stream: AsyncIterable<GenerateContentResponse>;
849
+ }, cb?: AIStreamCallbacksAndOptions): ReadableStream;
850
+
851
+ declare function HuggingFaceStream(res: AsyncGenerator<any>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
852
+
853
+ type InkeepOnFinalMetadata = {
854
+ chat_session_id: string;
855
+ records_cited: any;
856
+ };
857
+ type InkeepChatResultCallbacks = {
858
+ onFinal?: (completion: string, metadata?: InkeepOnFinalMetadata) => Promise<void> | void;
859
+ onRecordsCited?: (records_cited: InkeepOnFinalMetadata['records_cited']) => void;
860
+ };
861
+ type InkeepAIStreamCallbacksAndOptions = AIStreamCallbacksAndOptions & InkeepChatResultCallbacks;
862
+ declare function InkeepStream(res: Response, callbacks?: InkeepAIStreamCallbacksAndOptions): ReadableStream;
863
+
864
+ declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
865
+ stream: ReadableStream<any>;
866
+ writer: WritableStreamDefaultWriter<any>;
867
+ handlers: {
868
+ handleLLMNewToken: (token: string) => Promise<void>;
869
+ handleLLMStart: (_llm: any, _prompts: string[], runId: string) => Promise<void>;
870
+ handleLLMEnd: (_output: any, runId: string) => Promise<void>;
871
+ handleLLMError: (e: Error, runId: string) => Promise<void>;
872
+ handleChainStart: (_chain: any, _inputs: any, runId: string) => Promise<void>;
873
+ handleChainEnd: (_outputs: any, runId: string) => Promise<void>;
874
+ handleChainError: (e: Error, runId: string) => Promise<void>;
875
+ handleToolStart: (_tool: any, _input: string, runId: string) => Promise<void>;
876
+ handleToolEnd: (_output: string, runId: string) => Promise<void>;
877
+ handleToolError: (e: Error, runId: string) => Promise<void>;
878
+ };
879
+ };
880
+
881
+ declare function MistralStream(response: AsyncGenerator<ChatCompletionResponseChunk, void, unknown>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
882
+
883
+ interface Prediction {
884
+ id: string;
885
+ status: 'starting' | 'processing' | 'succeeded' | 'failed' | 'canceled';
886
+ version: string;
887
+ input: object;
888
+ output?: any;
889
+ source: 'api' | 'web';
890
+ error?: any;
891
+ logs?: string;
892
+ metrics?: {
893
+ predict_time?: number;
894
+ };
895
+ webhook?: string;
896
+ webhook_events_filter?: ('start' | 'output' | 'logs' | 'completed')[];
897
+ created_at: string;
898
+ updated_at?: string;
899
+ completed_at?: string;
900
+ urls: {
901
+ get: string;
902
+ cancel: string;
903
+ stream?: string;
904
+ };
905
+ }
906
+ /**
907
+ * Stream predictions from Replicate.
908
+ * Only certain models are supported and you must pass `stream: true` to
909
+ * replicate.predictions.create().
910
+ * @see https://github.com/replicate/replicate-javascript#streaming
911
+ *
912
+ * @example
913
+ * const response = await replicate.predictions.create({
914
+ * stream: true,
915
+ * input: {
916
+ * prompt: messages.join('\n')
917
+ * },
918
+ * version: '2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1'
919
+ * })
920
+ *
921
+ * const stream = await ReplicateStream(response)
922
+ * return new StreamingTextResponse(stream)
923
+ *
924
+ */
925
+ declare function ReplicateStream(res: Prediction, cb?: AIStreamCallbacksAndOptions, options?: {
926
+ headers?: Record<string, string>;
927
+ }): Promise<ReadableStream>;
928
+
929
+ /**
930
+ * A stream wrapper to send custom JSON-encoded data back to the client.
931
+ */
932
+ declare class experimental_StreamData {
933
+ private encoder;
934
+ private controller;
935
+ stream: TransformStream<Uint8Array, Uint8Array>;
936
+ private isClosedPromise;
937
+ private isClosedPromiseResolver;
938
+ private isClosed;
939
+ private data;
940
+ private messageAnnotations;
941
+ constructor();
942
+ close(): Promise<void>;
943
+ append(value: JSONValue): void;
944
+ appendMessageAnnotation(value: JSONValue): void;
945
+ }
946
+ /**
947
+ * A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
948
+ * This assumes every chunk is a 'text' chunk.
949
+ */
950
+ declare function createStreamDataTransformer(experimental_streamData: boolean | undefined): TransformStream<any, any>;
951
+
952
+ /**
953
+ * This is a naive implementation of the streaming React response API.
954
+ * Currently, it can carry the original raw content, data payload and a special
955
+ * UI payload and stream them via "rows" (nested promises).
956
+ * It must be used inside Server Actions so Flight can encode the React elements.
957
+ *
958
+ * It is naive as unlike the StreamingTextResponse, it does not send the diff
959
+ * between the rows, but flushing the full payload on each row.
960
+ */
961
+
962
+ type UINode = string | JSX.Element | JSX.Element[] | null | undefined;
963
+ type Payload = {
964
+ ui: UINode | Promise<UINode>;
965
+ content: string;
966
+ };
967
+ type ReactResponseRow = Payload & {
968
+ next: null | Promise<ReactResponseRow>;
969
+ };
970
+ /**
971
+ * A utility class for streaming React responses.
972
+ */
973
+ declare class experimental_StreamingReactResponse {
974
+ constructor(res: ReadableStream, options?: {
975
+ ui?: (message: {
976
+ content: string;
977
+ data?: JSONValue[] | undefined;
978
+ }) => UINode | Promise<UINode>;
979
+ data?: experimental_StreamData;
980
+ generateId?: IdGenerator;
981
+ });
982
+ }
983
+
984
+ /**
985
+ * A utility class for streaming text responses.
986
+ */
987
+ declare class StreamingTextResponse extends Response {
988
+ constructor(res: ReadableStream, init?: ResponseInit, data?: experimental_StreamData);
989
+ }
990
+ /**
991
+ * A utility function to stream a ReadableStream to a Node.js response-like object.
992
+ */
993
+ declare function streamToResponse(res: ReadableStream, response: ServerResponse, init?: {
994
+ headers?: Record<string, string>;
995
+ status?: number;
996
+ }): void;
997
+
998
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantMessage, COMPLEX_HEADER, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataMessage, Function, FunctionCall, FunctionCallHandler, FunctionCallPayload, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamString, StreamingTextResponse, Tool, ToolCall, ToolCallHandler, ToolCallPayload, ToolChoice, UseChatOptions, UseCompletionOptions, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, isStreamStringEqualToType, nanoid, readableFromAsyncIterable, streamToResponse, trimStartOfStreamHelper };