ai 3.1.2 → 3.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -510,11 +510,12 @@ interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
510
510
  description?: string;
511
511
  /**
512
512
  The schema of the input that the tool expects. The language model will use this to generate the input.
513
+ It is also used to validate the output of the language model.
513
514
  Use descriptions to make the input understandable for the language model.
514
515
  */
515
516
  parameters: PARAMETERS;
516
517
  /**
517
- An optional execute function for the actual execution function of the tool.
518
+ An async function that is called with the arguments from the tool call and produces a result.
518
519
  If not provided, the tool will not be executed automatically.
519
520
  */
520
521
  execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
@@ -615,7 +616,7 @@ Generate a text and call tools for a given prompt using a language model.
615
616
  This function does not stream the output. If you want to stream the output, use `streamText` instead.
616
617
 
617
618
  @param model - The language model to use.
618
- @param tools - The tools that the model can call. The model needs to support calling tools.
619
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
619
620
 
620
621
  @param system - A system message that will be part of the prompt.
621
622
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@@ -720,7 +721,7 @@ Generate a text and call tools for a given prompt using a language model.
720
721
  This function streams the output. If you do not want to stream the output, use `generateText` instead.
721
722
 
722
723
  @param model - The language model to use.
723
- @param tools - The tools that the model can call. The model needs to support calling tools.
724
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
724
725
 
725
726
  @param system - A system message that will be part of the prompt.
726
727
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@@ -784,10 +785,18 @@ A result object for accessing different stream types and additional information.
784
785
  declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
785
786
  private originalStream;
786
787
  /**
787
- Warnings from the model provider (e.g. unsupported settings)
788
+ Warnings from the model provider (e.g. unsupported settings).
788
789
  */
789
790
  readonly warnings: CallWarning[] | undefined;
790
791
  /**
792
+ The token usage of the generated text. Resolved when the response is finished.
793
+ */
794
+ readonly usage: Promise<TokenUsage>;
795
+ /**
796
+ The reason why the generation finished. Resolved when the response is finished.
797
+ */
798
+ readonly finishReason: Promise<FinishReason>;
799
+ /**
791
800
  Optional raw response data.
792
801
  */
793
802
  rawResponse?: {
@@ -1819,6 +1828,42 @@ type InkeepChatResultCallbacks = {
1819
1828
  type InkeepAIStreamCallbacksAndOptions = AIStreamCallbacksAndOptions & InkeepChatResultCallbacks;
1820
1829
  declare function InkeepStream(res: Response, callbacks?: InkeepAIStreamCallbacksAndOptions): ReadableStream;
1821
1830
 
1831
+ type LangChainImageDetail = 'auto' | 'low' | 'high';
1832
+ type LangChainMessageContentText = {
1833
+ type: 'text';
1834
+ text: string;
1835
+ };
1836
+ type LangChainMessageContentImageUrl = {
1837
+ type: 'image_url';
1838
+ image_url: string | {
1839
+ url: string;
1840
+ detail?: LangChainImageDetail;
1841
+ };
1842
+ };
1843
+ type LangChainMessageContentComplex = LangChainMessageContentText | LangChainMessageContentImageUrl | (Record<string, any> & {
1844
+ type?: 'text' | 'image_url' | string;
1845
+ }) | (Record<string, any> & {
1846
+ type?: never;
1847
+ });
1848
+ type LangChainMessageContent = string | LangChainMessageContentComplex[];
1849
+ type LangChainAIMessageChunk = {
1850
+ content: LangChainMessageContent;
1851
+ };
1852
+ /**
1853
+ Converts the result of a LangChain Expression Language stream invocation to an AIStream.
1854
+ */
1855
+ declare function toAIStream(stream: ReadableStream<LangChainAIMessageChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
1856
+
1857
+ declare const langchainAdapter_toAIStream: typeof toAIStream;
1858
+ declare namespace langchainAdapter {
1859
+ export {
1860
+ langchainAdapter_toAIStream as toAIStream,
1861
+ };
1862
+ }
1863
+
1864
+ /**
1865
+ @deprecated Use LangChainAdapter.toAIStream() instead.
1866
+ */
1822
1867
  declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
1823
1868
  stream: ReadableStream<any>;
1824
1869
  writer: WritableStreamDefaultWriter<any>;
@@ -1983,4 +2028,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
1983
2028
  status?: number;
1984
2029
  }): void;
1985
2030
 
1986
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2031
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -510,11 +510,12 @@ interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
510
510
  description?: string;
511
511
  /**
512
512
  The schema of the input that the tool expects. The language model will use this to generate the input.
513
+ It is also used to validate the output of the language model.
513
514
  Use descriptions to make the input understandable for the language model.
514
515
  */
515
516
  parameters: PARAMETERS;
516
517
  /**
517
- An optional execute function for the actual execution function of the tool.
518
+ An async function that is called with the arguments from the tool call and produces a result.
518
519
  If not provided, the tool will not be executed automatically.
519
520
  */
520
521
  execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
@@ -615,7 +616,7 @@ Generate a text and call tools for a given prompt using a language model.
615
616
  This function does not stream the output. If you want to stream the output, use `streamText` instead.
616
617
 
617
618
  @param model - The language model to use.
618
- @param tools - The tools that the model can call. The model needs to support calling tools.
619
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
619
620
 
620
621
  @param system - A system message that will be part of the prompt.
621
622
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@@ -720,7 +721,7 @@ Generate a text and call tools for a given prompt using a language model.
720
721
  This function streams the output. If you do not want to stream the output, use `generateText` instead.
721
722
 
722
723
  @param model - The language model to use.
723
- @param tools - The tools that the model can call. The model needs to support calling tools.
724
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
724
725
 
725
726
  @param system - A system message that will be part of the prompt.
726
727
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@@ -784,10 +785,18 @@ A result object for accessing different stream types and additional information.
784
785
  declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
785
786
  private originalStream;
786
787
  /**
787
- Warnings from the model provider (e.g. unsupported settings)
788
+ Warnings from the model provider (e.g. unsupported settings).
788
789
  */
789
790
  readonly warnings: CallWarning[] | undefined;
790
791
  /**
792
+ The token usage of the generated text. Resolved when the response is finished.
793
+ */
794
+ readonly usage: Promise<TokenUsage>;
795
+ /**
796
+ The reason why the generation finished. Resolved when the response is finished.
797
+ */
798
+ readonly finishReason: Promise<FinishReason>;
799
+ /**
791
800
  Optional raw response data.
792
801
  */
793
802
  rawResponse?: {
@@ -1819,6 +1828,42 @@ type InkeepChatResultCallbacks = {
1819
1828
  type InkeepAIStreamCallbacksAndOptions = AIStreamCallbacksAndOptions & InkeepChatResultCallbacks;
1820
1829
  declare function InkeepStream(res: Response, callbacks?: InkeepAIStreamCallbacksAndOptions): ReadableStream;
1821
1830
 
1831
+ type LangChainImageDetail = 'auto' | 'low' | 'high';
1832
+ type LangChainMessageContentText = {
1833
+ type: 'text';
1834
+ text: string;
1835
+ };
1836
+ type LangChainMessageContentImageUrl = {
1837
+ type: 'image_url';
1838
+ image_url: string | {
1839
+ url: string;
1840
+ detail?: LangChainImageDetail;
1841
+ };
1842
+ };
1843
+ type LangChainMessageContentComplex = LangChainMessageContentText | LangChainMessageContentImageUrl | (Record<string, any> & {
1844
+ type?: 'text' | 'image_url' | string;
1845
+ }) | (Record<string, any> & {
1846
+ type?: never;
1847
+ });
1848
+ type LangChainMessageContent = string | LangChainMessageContentComplex[];
1849
+ type LangChainAIMessageChunk = {
1850
+ content: LangChainMessageContent;
1851
+ };
1852
+ /**
1853
+ Converts the result of a LangChain Expression Language stream invocation to an AIStream.
1854
+ */
1855
+ declare function toAIStream(stream: ReadableStream<LangChainAIMessageChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
1856
+
1857
+ declare const langchainAdapter_toAIStream: typeof toAIStream;
1858
+ declare namespace langchainAdapter {
1859
+ export {
1860
+ langchainAdapter_toAIStream as toAIStream,
1861
+ };
1862
+ }
1863
+
1864
+ /**
1865
+ @deprecated Use LangChainAdapter.toAIStream() instead.
1866
+ */
1822
1867
  declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
1823
1868
  stream: ReadableStream<any>;
1824
1869
  writer: WritableStreamDefaultWriter<any>;
@@ -1983,4 +2028,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
1983
2028
  status?: number;
1984
2029
  }): void;
1985
2030
 
1986
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2031
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.js CHANGED
@@ -52,6 +52,7 @@ __export(streams_exports, {
52
52
  InvalidResponseDataError: () => import_provider8.InvalidResponseDataError,
53
53
  InvalidToolArgumentsError: () => import_provider8.InvalidToolArgumentsError,
54
54
  JSONParseError: () => import_provider8.JSONParseError,
55
+ LangChainAdapter: () => langchain_adapter_exports,
55
56
  LangChainStream: () => LangChainStream,
56
57
  LoadAPIKeyError: () => import_provider8.LoadAPIKeyError,
57
58
  MistralStream: () => MistralStream,
@@ -755,6 +756,7 @@ function fixJson(input) {
755
756
  break;
756
757
  }
757
758
  case "}": {
759
+ lastValidIndex = i;
758
760
  stack.pop();
759
761
  break;
760
762
  }
@@ -1486,9 +1488,27 @@ var StreamTextResult = class {
1486
1488
  warnings,
1487
1489
  rawResponse
1488
1490
  }) {
1489
- this.originalStream = stream;
1490
1491
  this.warnings = warnings;
1491
1492
  this.rawResponse = rawResponse;
1493
+ let resolveUsage;
1494
+ this.usage = new Promise((resolve) => {
1495
+ resolveUsage = resolve;
1496
+ });
1497
+ let resolveFinishReason;
1498
+ this.finishReason = new Promise((resolve) => {
1499
+ resolveFinishReason = resolve;
1500
+ });
1501
+ this.originalStream = stream.pipeThrough(
1502
+ new TransformStream({
1503
+ async transform(chunk, controller) {
1504
+ controller.enqueue(chunk);
1505
+ if (chunk.type === "finish") {
1506
+ resolveUsage(chunk.usage);
1507
+ resolveFinishReason(chunk.finishReason);
1508
+ }
1509
+ }
1510
+ })
1511
+ );
1492
1512
  }
1493
1513
  /**
1494
1514
  Split out a new stream from the original stream.
@@ -2445,6 +2465,30 @@ function InkeepStream(res, callbacks) {
2445
2465
  );
2446
2466
  }
2447
2467
 
2468
+ // streams/langchain-adapter.ts
2469
+ var langchain_adapter_exports = {};
2470
+ __export(langchain_adapter_exports, {
2471
+ toAIStream: () => toAIStream
2472
+ });
2473
+ function toAIStream(stream, callbacks) {
2474
+ return stream.pipeThrough(
2475
+ new TransformStream({
2476
+ transform: async (chunk, controller) => {
2477
+ if (typeof chunk.content === "string") {
2478
+ controller.enqueue(chunk.content);
2479
+ } else {
2480
+ const content = chunk.content;
2481
+ for (const item of content) {
2482
+ if (item.type === "text") {
2483
+ controller.enqueue(item.text);
2484
+ }
2485
+ }
2486
+ }
2487
+ }
2488
+ })
2489
+ ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(createStreamDataTransformer());
2490
+ }
2491
+
2448
2492
  // streams/langchain-stream.ts
2449
2493
  function LangChainStream(callbacks) {
2450
2494
  const stream = new TransformStream();
@@ -3072,6 +3116,7 @@ function streamToResponse(res, response, init) {
3072
3116
  InvalidResponseDataError,
3073
3117
  InvalidToolArgumentsError,
3074
3118
  JSONParseError,
3119
+ LangChainAdapter,
3075
3120
  LangChainStream,
3076
3121
  LoadAPIKeyError,
3077
3122
  MistralStream,