ai 3.4.32 → 4.0.0-canary.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/dist/index.d.mts +6 -474
- package/dist/index.d.ts +6 -474
- package/dist/index.js +120 -783
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +107 -758
- package/dist/index.mjs.map +1 -1
- package/package.json +11 -49
- package/rsc/dist/index.d.ts +5 -67
- package/rsc/dist/rsc-server.d.mts +5 -67
- package/rsc/dist/rsc-server.mjs +19 -623
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/solid/dist/index.d.mts +0 -13
- package/solid/dist/index.d.ts +0 -13
- package/solid/dist/index.js +0 -35
- package/solid/dist/index.js.map +0 -1
- package/solid/dist/index.mjs +0 -12
- package/solid/dist/index.mjs.map +0 -1
- package/svelte/dist/index.d.mts +0 -175
- package/svelte/dist/index.d.ts +0 -175
- package/svelte/dist/index.js +0 -1161
- package/svelte/dist/index.js.map +0 -1
- package/svelte/dist/index.mjs +0 -1136
- package/svelte/dist/index.mjs.map +0 -1
- package/vue/dist/index.d.mts +0 -17
- package/vue/dist/index.d.ts +0 -17
- package/vue/dist/index.js +0 -38
- package/vue/dist/index.js.map +0 -1
- package/vue/dist/index.mjs +0 -15
- package/vue/dist/index.mjs.map +0 -1
package/dist/index.d.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import { ToolInvocation, Attachment, Schema, DeepPartial,
|
1
|
+
import { ToolInvocation, Attachment, Schema, DeepPartial, AssistantMessage, DataMessage, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
|
3
3
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
4
4
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
@@ -428,7 +428,7 @@ type ToolResultContent = Array<{
|
|
428
428
|
/**
|
429
429
|
Text content part of a prompt. It contains a string of text.
|
430
430
|
*/
|
431
|
-
interface TextPart
|
431
|
+
interface TextPart {
|
432
432
|
type: 'text';
|
433
433
|
/**
|
434
434
|
The text content.
|
@@ -585,7 +585,7 @@ type ExperimentalUserMessage = CoreUserMessage;
|
|
585
585
|
/**
|
586
586
|
Content of a user message. It can be a string or an array of text and image parts.
|
587
587
|
*/
|
588
|
-
type UserContent = string | Array<TextPart
|
588
|
+
type UserContent = string | Array<TextPart | ImagePart | FilePart>;
|
589
589
|
/**
|
590
590
|
An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
|
591
591
|
*/
|
@@ -606,7 +606,7 @@ type ExperimentalAssistantMessage = CoreAssistantMessage;
|
|
606
606
|
/**
|
607
607
|
Content of an assistant message. It can be a string or an array of text and tool call parts.
|
608
608
|
*/
|
609
|
-
type AssistantContent = string | Array<TextPart
|
609
|
+
type AssistantContent = string | Array<TextPart | ToolCallPart>;
|
610
610
|
/**
|
611
611
|
A tool message. It contains the result of one or more tool calls.
|
612
612
|
*/
|
@@ -2508,231 +2508,6 @@ declare class RetryError extends AISDKError {
|
|
2508
2508
|
};
|
2509
2509
|
}
|
2510
2510
|
|
2511
|
-
declare interface AzureChatCompletions {
|
2512
|
-
id: string;
|
2513
|
-
created: Date;
|
2514
|
-
choices: AzureChatChoice[];
|
2515
|
-
systemFingerprint?: string;
|
2516
|
-
usage?: AzureCompletionsUsage;
|
2517
|
-
promptFilterResults: any[];
|
2518
|
-
}
|
2519
|
-
declare interface AzureChatChoice {
|
2520
|
-
message?: AzureChatResponseMessage;
|
2521
|
-
index: number;
|
2522
|
-
finishReason: string | null;
|
2523
|
-
delta?: AzureChatResponseMessage;
|
2524
|
-
}
|
2525
|
-
declare interface AzureChatResponseMessage {
|
2526
|
-
role: string;
|
2527
|
-
content: string | null;
|
2528
|
-
toolCalls: AzureChatCompletionsFunctionToolCall[];
|
2529
|
-
functionCall?: AzureFunctionCall;
|
2530
|
-
}
|
2531
|
-
declare interface AzureCompletionsUsage {
|
2532
|
-
completionTokens: number;
|
2533
|
-
promptTokens: number;
|
2534
|
-
totalTokens: number;
|
2535
|
-
}
|
2536
|
-
declare interface AzureFunctionCall {
|
2537
|
-
name: string;
|
2538
|
-
arguments: string;
|
2539
|
-
}
|
2540
|
-
declare interface AzureChatCompletionsFunctionToolCall {
|
2541
|
-
type: 'function';
|
2542
|
-
function: AzureFunctionCall;
|
2543
|
-
id: string;
|
2544
|
-
}
|
2545
|
-
|
2546
|
-
type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
|
2547
|
-
/**
|
2548
|
-
* @example
|
2549
|
-
* ```js
|
2550
|
-
* const response = await openai.chat.completions.create({
|
2551
|
-
* model: 'gpt-3.5-turbo-0613',
|
2552
|
-
* stream: true,
|
2553
|
-
* messages,
|
2554
|
-
* functions,
|
2555
|
-
* })
|
2556
|
-
*
|
2557
|
-
* const stream = OpenAIStream(response, {
|
2558
|
-
* experimental_onFunctionCall: async (functionCallPayload, createFunctionCallMessages) => {
|
2559
|
-
* // ... run your custom logic here
|
2560
|
-
* const result = await myFunction(functionCallPayload)
|
2561
|
-
*
|
2562
|
-
* // Ask for another completion, or return a string to send to the client as an assistant message.
|
2563
|
-
* return await openai.chat.completions.create({
|
2564
|
-
* model: 'gpt-3.5-turbo-0613',
|
2565
|
-
* stream: true,
|
2566
|
-
* // Append the relevant "assistant" and "function" call messages
|
2567
|
-
* messages: [...messages, ...createFunctionCallMessages(result)],
|
2568
|
-
* functions,
|
2569
|
-
* })
|
2570
|
-
* }
|
2571
|
-
* })
|
2572
|
-
* ```
|
2573
|
-
*/
|
2574
|
-
experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue$1) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2575
|
-
/**
|
2576
|
-
* @example
|
2577
|
-
* ```js
|
2578
|
-
* const response = await openai.chat.completions.create({
|
2579
|
-
* model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
|
2580
|
-
* stream: true,
|
2581
|
-
* messages,
|
2582
|
-
* tools,
|
2583
|
-
* tool_choice: "auto", // auto is default, but we'll be explicit
|
2584
|
-
* })
|
2585
|
-
*
|
2586
|
-
* const stream = OpenAIStream(response, {
|
2587
|
-
* experimental_onToolCall: async (toolCallPayload, appendToolCallMessages) => {
|
2588
|
-
* let messages: CreateMessage[] = []
|
2589
|
-
* // There might be multiple tool calls, so we need to iterate through them
|
2590
|
-
* for (const tool of toolCallPayload.tools) {
|
2591
|
-
* // ... run your custom logic here
|
2592
|
-
* const result = await myFunction(tool.function)
|
2593
|
-
* // Append the relevant "assistant" and "tool" call messages
|
2594
|
-
* appendToolCallMessage({tool_call_id:tool.id, function_name:tool.function.name, tool_call_result:result})
|
2595
|
-
* }
|
2596
|
-
* // Ask for another completion, or return a string to send to the client as an assistant message.
|
2597
|
-
* return await openai.chat.completions.create({
|
2598
|
-
* model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
|
2599
|
-
* stream: true,
|
2600
|
-
* // Append the results messages, calling appendToolCallMessage without
|
2601
|
-
* // any arguments will jsut return the accumulated messages
|
2602
|
-
* messages: [...messages, ...appendToolCallMessage()],
|
2603
|
-
* tools,
|
2604
|
-
* tool_choice: "auto", // auto is default, but we'll be explicit
|
2605
|
-
* })
|
2606
|
-
* }
|
2607
|
-
* })
|
2608
|
-
* ```
|
2609
|
-
*/
|
2610
|
-
experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
|
2611
|
-
tool_call_id: string;
|
2612
|
-
function_name: string;
|
2613
|
-
tool_call_result: JSONValue$1;
|
2614
|
-
}) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2615
|
-
};
|
2616
|
-
interface ChatCompletionChunk {
|
2617
|
-
id: string;
|
2618
|
-
choices: Array<ChatCompletionChunkChoice>;
|
2619
|
-
created: number;
|
2620
|
-
model: string;
|
2621
|
-
object: string;
|
2622
|
-
}
|
2623
|
-
interface ChatCompletionChunkChoice {
|
2624
|
-
delta: ChoiceDelta;
|
2625
|
-
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
|
2626
|
-
index: number;
|
2627
|
-
}
|
2628
|
-
interface ChoiceDelta {
|
2629
|
-
/**
|
2630
|
-
* The contents of the chunk message.
|
2631
|
-
*/
|
2632
|
-
content?: string | null;
|
2633
|
-
/**
|
2634
|
-
* The name and arguments of a function that should be called, as generated by the
|
2635
|
-
* model.
|
2636
|
-
*/
|
2637
|
-
function_call?: FunctionCall$1;
|
2638
|
-
/**
|
2639
|
-
* The role of the author of this message.
|
2640
|
-
*/
|
2641
|
-
role?: 'system' | 'user' | 'assistant' | 'tool';
|
2642
|
-
tool_calls?: Array<DeltaToolCall>;
|
2643
|
-
}
|
2644
|
-
interface DeltaToolCall {
|
2645
|
-
index: number;
|
2646
|
-
/**
|
2647
|
-
* The ID of the tool call.
|
2648
|
-
*/
|
2649
|
-
id?: string;
|
2650
|
-
/**
|
2651
|
-
* The function that the model called.
|
2652
|
-
*/
|
2653
|
-
function?: ToolCallFunction;
|
2654
|
-
/**
|
2655
|
-
* The type of the tool. Currently, only `function` is supported.
|
2656
|
-
*/
|
2657
|
-
type?: 'function';
|
2658
|
-
}
|
2659
|
-
interface ToolCallFunction {
|
2660
|
-
/**
|
2661
|
-
* The arguments to call the function with, as generated by the model in JSON
|
2662
|
-
* format. Note that the model does not always generate valid JSON, and may
|
2663
|
-
* hallucinate parameters not defined by your function schema. Validate the
|
2664
|
-
* arguments in your code before calling your function.
|
2665
|
-
*/
|
2666
|
-
arguments?: string;
|
2667
|
-
/**
|
2668
|
-
* The name of the function to call.
|
2669
|
-
*/
|
2670
|
-
name?: string;
|
2671
|
-
}
|
2672
|
-
/**
|
2673
|
-
* https://github.com/openai/openai-node/blob/3ec43ee790a2eb6a0ccdd5f25faa23251b0f9b8e/src/resources/completions.ts#L28C1-L64C1
|
2674
|
-
* Completions API. Streamed and non-streamed responses are the same.
|
2675
|
-
*/
|
2676
|
-
interface Completion {
|
2677
|
-
/**
|
2678
|
-
* A unique identifier for the completion.
|
2679
|
-
*/
|
2680
|
-
id: string;
|
2681
|
-
/**
|
2682
|
-
* The list of completion choices the model generated for the input prompt.
|
2683
|
-
*/
|
2684
|
-
choices: Array<CompletionChoice>;
|
2685
|
-
/**
|
2686
|
-
* The Unix timestamp of when the completion was created.
|
2687
|
-
*/
|
2688
|
-
created: number;
|
2689
|
-
/**
|
2690
|
-
* The model used for completion.
|
2691
|
-
*/
|
2692
|
-
model: string;
|
2693
|
-
/**
|
2694
|
-
* The object type, which is always "text_completion"
|
2695
|
-
*/
|
2696
|
-
object: string;
|
2697
|
-
/**
|
2698
|
-
* Usage statistics for the completion request.
|
2699
|
-
*/
|
2700
|
-
usage?: CompletionUsage;
|
2701
|
-
}
|
2702
|
-
interface CompletionChoice {
|
2703
|
-
/**
|
2704
|
-
* The reason the model stopped generating tokens. This will be `stop` if the model
|
2705
|
-
* hit a natural stop point or a provided stop sequence, or `length` if the maximum
|
2706
|
-
* number of tokens specified in the request was reached.
|
2707
|
-
*/
|
2708
|
-
finish_reason: 'stop' | 'length' | 'content_filter';
|
2709
|
-
index: number;
|
2710
|
-
logprobs: any | null;
|
2711
|
-
text: string;
|
2712
|
-
}
|
2713
|
-
interface CompletionUsage {
|
2714
|
-
/**
|
2715
|
-
* Usage statistics for the completion request.
|
2716
|
-
*/
|
2717
|
-
/**
|
2718
|
-
* Number of tokens in the generated completion.
|
2719
|
-
*/
|
2720
|
-
completion_tokens: number;
|
2721
|
-
/**
|
2722
|
-
* Number of tokens in the prompt.
|
2723
|
-
*/
|
2724
|
-
prompt_tokens: number;
|
2725
|
-
/**
|
2726
|
-
* Total number of tokens used in the request (prompt + completion).
|
2727
|
-
*/
|
2728
|
-
total_tokens: number;
|
2729
|
-
}
|
2730
|
-
type AsyncIterableOpenAIStreamReturnTypes = AsyncIterable<ChatCompletionChunk> | AsyncIterable<Completion> | AsyncIterable<AzureChatCompletions>;
|
2731
|
-
/**
|
2732
|
-
* @deprecated Use the [OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai) instead.
|
2733
|
-
*/
|
2734
|
-
declare function OpenAIStream(res: Response | AsyncIterableOpenAIStreamReturnTypes, callbacks?: OpenAIStreamCallbacks): ReadableStream;
|
2735
|
-
|
2736
2511
|
interface FunctionCallPayload {
|
2737
2512
|
name: string;
|
2738
2513
|
arguments: Record<string, unknown>;
|
@@ -2820,7 +2595,7 @@ declare function createEventStreamTransformer(customParser?: AIStreamParser): Tr
|
|
2820
2595
|
* };
|
2821
2596
|
* const transformer = createCallbacksTransformer(callbacks);
|
2822
2597
|
*/
|
2823
|
-
declare function createCallbacksTransformer(cb: AIStreamCallbacksAndOptions |
|
2598
|
+
declare function createCallbacksTransformer(cb: AIStreamCallbacksAndOptions | undefined): TransformStream<string | {
|
2824
2599
|
isText: false;
|
2825
2600
|
content: string;
|
2826
2601
|
}, Uint8Array>;
|
@@ -2865,94 +2640,6 @@ declare function AIStream(response: Response, customParser?: AIStreamParser, cal
|
|
2865
2640
|
*/
|
2866
2641
|
declare function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>): ReadableStream<T>;
|
2867
2642
|
|
2868
|
-
interface CompletionChunk {
|
2869
|
-
/**
|
2870
|
-
* Unique object identifier.
|
2871
|
-
*
|
2872
|
-
* The format and length of IDs may change over time.
|
2873
|
-
*/
|
2874
|
-
id: string;
|
2875
|
-
/**
|
2876
|
-
* The resulting completion up to and excluding the stop sequences.
|
2877
|
-
*/
|
2878
|
-
completion: string;
|
2879
|
-
/**
|
2880
|
-
* The model that handled the request.
|
2881
|
-
*/
|
2882
|
-
model: string;
|
2883
|
-
/**
|
2884
|
-
* The reason that we stopped.
|
2885
|
-
*
|
2886
|
-
* This may be one the following values:
|
2887
|
-
*
|
2888
|
-
* - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
|
2889
|
-
* `stop_sequences` parameter, or a stop sequence built into the model
|
2890
|
-
* - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
|
2891
|
-
*/
|
2892
|
-
stop_reason: string | null;
|
2893
|
-
/**
|
2894
|
-
* Object type.
|
2895
|
-
*
|
2896
|
-
* For Text Completions, this is always `"completion"`.
|
2897
|
-
*/
|
2898
|
-
type: 'completion';
|
2899
|
-
}
|
2900
|
-
interface Message {
|
2901
|
-
id: string;
|
2902
|
-
content: Array<ContentBlock>;
|
2903
|
-
model: string;
|
2904
|
-
role: 'assistant';
|
2905
|
-
stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
|
2906
|
-
stop_sequence: string | null;
|
2907
|
-
type: 'message';
|
2908
|
-
}
|
2909
|
-
interface ContentBlock {
|
2910
|
-
text: string;
|
2911
|
-
type: 'text';
|
2912
|
-
}
|
2913
|
-
interface TextDelta {
|
2914
|
-
text: string;
|
2915
|
-
type: 'text_delta';
|
2916
|
-
}
|
2917
|
-
interface ContentBlockDeltaEvent {
|
2918
|
-
delta: TextDelta;
|
2919
|
-
index: number;
|
2920
|
-
type: 'content_block_delta';
|
2921
|
-
}
|
2922
|
-
interface ContentBlockStartEvent {
|
2923
|
-
content_block: ContentBlock;
|
2924
|
-
index: number;
|
2925
|
-
type: 'content_block_start';
|
2926
|
-
}
|
2927
|
-
interface ContentBlockStopEvent {
|
2928
|
-
index: number;
|
2929
|
-
type: 'content_block_stop';
|
2930
|
-
}
|
2931
|
-
interface MessageDeltaEventDelta {
|
2932
|
-
stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
|
2933
|
-
stop_sequence: string | null;
|
2934
|
-
}
|
2935
|
-
interface MessageDeltaEvent {
|
2936
|
-
delta: MessageDeltaEventDelta;
|
2937
|
-
type: 'message_delta';
|
2938
|
-
}
|
2939
|
-
type MessageStreamEvent = MessageStartEvent | MessageDeltaEvent | MessageStopEvent | ContentBlockStartEvent | ContentBlockDeltaEvent | ContentBlockStopEvent;
|
2940
|
-
interface MessageStartEvent {
|
2941
|
-
message: Message;
|
2942
|
-
type: 'message_start';
|
2943
|
-
}
|
2944
|
-
interface MessageStopEvent {
|
2945
|
-
type: 'message_stop';
|
2946
|
-
}
|
2947
|
-
/**
|
2948
|
-
* Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
|
2949
|
-
* or the return value of `await client.completions.create({ stream: true })`
|
2950
|
-
* from the `@anthropic-ai/sdk` package.
|
2951
|
-
*
|
2952
|
-
* @deprecated Use the [Anthropic provider](https://sdk.vercel.ai/providers/ai-sdk-providers/anthropic) instead.
|
2953
|
-
*/
|
2954
|
-
declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
2955
|
-
|
2956
2643
|
/**
|
2957
2644
|
You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
|
2958
2645
|
*/
|
@@ -3002,64 +2689,6 @@ declare function AssistantResponse({ threadId, messageId }: AssistantResponseSet
|
|
3002
2689
|
*/
|
3003
2690
|
declare const experimental_AssistantResponse: typeof AssistantResponse;
|
3004
2691
|
|
3005
|
-
interface AWSBedrockResponse {
|
3006
|
-
body?: AsyncIterable<{
|
3007
|
-
chunk?: {
|
3008
|
-
bytes?: Uint8Array;
|
3009
|
-
};
|
3010
|
-
}>;
|
3011
|
-
}
|
3012
|
-
declare function AWSBedrockAnthropicMessagesStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3013
|
-
declare function AWSBedrockAnthropicStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3014
|
-
declare function AWSBedrockCohereStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3015
|
-
declare function AWSBedrockLlama2Stream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3016
|
-
declare function AWSBedrockStream(response: AWSBedrockResponse, callbacks: AIStreamCallbacksAndOptions | undefined, extractTextDeltaFromChunk: (chunk: any) => string): ReadableStream<any>;
|
3017
|
-
|
3018
|
-
interface StreamChunk {
|
3019
|
-
text?: string;
|
3020
|
-
eventType: 'stream-start' | 'search-queries-generation' | 'search-results' | 'text-generation' | 'citation-generation' | 'stream-end';
|
3021
|
-
}
|
3022
|
-
declare function CohereStream(reader: Response | AsyncIterable<StreamChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3023
|
-
|
3024
|
-
interface GenerateContentResponse {
|
3025
|
-
candidates?: GenerateContentCandidate[];
|
3026
|
-
}
|
3027
|
-
interface GenerateContentCandidate {
|
3028
|
-
index: number;
|
3029
|
-
content: Content;
|
3030
|
-
}
|
3031
|
-
interface Content {
|
3032
|
-
role: string;
|
3033
|
-
parts: Part[];
|
3034
|
-
}
|
3035
|
-
type Part = TextPart | InlineDataPart;
|
3036
|
-
interface InlineDataPart {
|
3037
|
-
text?: never;
|
3038
|
-
}
|
3039
|
-
interface TextPart {
|
3040
|
-
text: string;
|
3041
|
-
inlineData?: never;
|
3042
|
-
}
|
3043
|
-
/**
|
3044
|
-
* @deprecated Use the [Google Generative AI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/google-generative-ai) instead.
|
3045
|
-
*/
|
3046
|
-
declare function GoogleGenerativeAIStream(response: {
|
3047
|
-
stream: AsyncIterable<GenerateContentResponse>;
|
3048
|
-
}, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
3049
|
-
|
3050
|
-
declare function HuggingFaceStream(res: AsyncGenerator<any>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3051
|
-
|
3052
|
-
type InkeepOnFinalMetadata = {
|
3053
|
-
chat_session_id: string;
|
3054
|
-
records_cited: any;
|
3055
|
-
};
|
3056
|
-
type InkeepChatResultCallbacks = {
|
3057
|
-
onFinal?: (completion: string, metadata?: InkeepOnFinalMetadata) => Promise<void> | void;
|
3058
|
-
onRecordsCited?: (records_cited: InkeepOnFinalMetadata['records_cited']) => void;
|
3059
|
-
};
|
3060
|
-
type InkeepAIStreamCallbacksAndOptions = AIStreamCallbacksAndOptions & InkeepChatResultCallbacks;
|
3061
|
-
declare function InkeepStream(res: Response, callbacks?: InkeepAIStreamCallbacksAndOptions): ReadableStream;
|
3062
|
-
|
3063
2692
|
/**
|
3064
2693
|
* A stream wrapper to send custom JSON-encoded data back to the client.
|
3065
2694
|
*/
|
@@ -3162,99 +2791,6 @@ declare namespace llamaindexAdapter {
|
|
3162
2791
|
};
|
3163
2792
|
}
|
3164
2793
|
|
3165
|
-
/**
|
3166
|
-
* @deprecated Use [LangChainAdapter](https://sdk.vercel.ai/providers/adapters/langchain) instead.
|
3167
|
-
*/
|
3168
|
-
declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
|
3169
|
-
stream: ReadableStream<any>;
|
3170
|
-
writer: WritableStreamDefaultWriter<any>;
|
3171
|
-
handlers: {
|
3172
|
-
handleLLMNewToken: (token: string) => Promise<void>;
|
3173
|
-
handleLLMStart: (_llm: any, _prompts: string[], runId: string) => Promise<void>;
|
3174
|
-
handleLLMEnd: (_output: any, runId: string) => Promise<void>;
|
3175
|
-
handleLLMError: (e: Error, runId: string) => Promise<void>;
|
3176
|
-
handleChainStart: (_chain: any, _inputs: any, runId: string) => Promise<void>;
|
3177
|
-
handleChainEnd: (_outputs: any, runId: string) => Promise<void>;
|
3178
|
-
handleChainError: (e: Error, runId: string) => Promise<void>;
|
3179
|
-
handleToolStart: (_tool: any, _input: string, runId: string) => Promise<void>;
|
3180
|
-
handleToolEnd: (_output: string, runId: string) => Promise<void>;
|
3181
|
-
handleToolError: (e: Error, runId: string) => Promise<void>;
|
3182
|
-
};
|
3183
|
-
};
|
3184
|
-
|
3185
|
-
interface ChatCompletionResponseChunk {
|
3186
|
-
id: string;
|
3187
|
-
object: 'chat.completion.chunk';
|
3188
|
-
created: number;
|
3189
|
-
model: string;
|
3190
|
-
choices: ChatCompletionResponseChunkChoice[];
|
3191
|
-
}
|
3192
|
-
interface ChatCompletionResponseChunkChoice {
|
3193
|
-
index: number;
|
3194
|
-
delta: {
|
3195
|
-
role?: string;
|
3196
|
-
content?: string;
|
3197
|
-
tool_calls?: ToolCalls[];
|
3198
|
-
};
|
3199
|
-
finish_reason: string;
|
3200
|
-
}
|
3201
|
-
interface FunctionCall {
|
3202
|
-
name: string;
|
3203
|
-
arguments: string;
|
3204
|
-
}
|
3205
|
-
interface ToolCalls {
|
3206
|
-
id: 'null';
|
3207
|
-
type: 'function';
|
3208
|
-
function: FunctionCall;
|
3209
|
-
}
|
3210
|
-
declare function MistralStream(response: AsyncGenerator<ChatCompletionResponseChunk, void, unknown>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
3211
|
-
|
3212
|
-
interface Prediction {
|
3213
|
-
id: string;
|
3214
|
-
status: 'starting' | 'processing' | 'succeeded' | 'failed' | 'canceled';
|
3215
|
-
version: string;
|
3216
|
-
input: object;
|
3217
|
-
output?: any;
|
3218
|
-
source: 'api' | 'web';
|
3219
|
-
error?: any;
|
3220
|
-
logs?: string;
|
3221
|
-
metrics?: {
|
3222
|
-
predict_time?: number;
|
3223
|
-
};
|
3224
|
-
webhook?: string;
|
3225
|
-
webhook_events_filter?: ('start' | 'output' | 'logs' | 'completed')[];
|
3226
|
-
created_at: string;
|
3227
|
-
updated_at?: string;
|
3228
|
-
completed_at?: string;
|
3229
|
-
urls: {
|
3230
|
-
get: string;
|
3231
|
-
cancel: string;
|
3232
|
-
stream?: string;
|
3233
|
-
};
|
3234
|
-
}
|
3235
|
-
/**
|
3236
|
-
* Stream predictions from Replicate.
|
3237
|
-
* Only certain models are supported and you must pass `stream: true` to
|
3238
|
-
* replicate.predictions.create().
|
3239
|
-
* @see https://github.com/replicate/replicate-javascript#streaming
|
3240
|
-
*
|
3241
|
-
* @example
|
3242
|
-
* const response = await replicate.predictions.create({
|
3243
|
-
* stream: true,
|
3244
|
-
* input: {
|
3245
|
-
* prompt: messages.join('\n')
|
3246
|
-
* },
|
3247
|
-
* version: '2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1'
|
3248
|
-
* })
|
3249
|
-
*
|
3250
|
-
* const stream = await ReplicateStream(response)
|
3251
|
-
* return new StreamingTextResponse(stream)
|
3252
|
-
*
|
3253
|
-
*/
|
3254
|
-
declare function ReplicateStream(res: Prediction, cb?: AIStreamCallbacksAndOptions, options?: {
|
3255
|
-
headers?: Record<string, string>;
|
3256
|
-
}): Promise<ReadableStream>;
|
3257
|
-
|
3258
2794
|
/**
|
3259
2795
|
* A utility function to stream a ReadableStream to a Node.js response-like object.
|
3260
2796
|
*
|
@@ -3283,9 +2819,5 @@ declare class StreamingTextResponse extends Response {
|
|
3283
2819
|
}
|
3284
2820
|
|
3285
2821
|
declare const generateId: (size?: number) => string;
|
3286
|
-
/**
|
3287
|
-
@deprecated Use `generateId` instead.
|
3288
|
-
*/
|
3289
|
-
declare const nanoid: (size?: number) => string;
|
3290
2822
|
|
3291
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions,
|
2823
|
+
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AssistantContent, AssistantResponse, CallWarning, CompletionTokenUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelResponseMetadataWithHeaders, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, experimental_wrapLanguageModel, generateId, generateObject, generateText, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|