ai 4.0.5 → 4.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # ai
2
2
 
3
+ ## 4.0.6
4
+
5
+ ### Patch Changes
6
+
7
+ - fc18132: feat (ai/core): experimental output for generateText
8
+ - 2779f6d: fix (ai/core): do not send maxRetries into providers
9
+
3
10
  ## 4.0.5
4
11
 
5
12
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -221,7 +221,7 @@ Embed a value using an embedding model. The type of the value is defined by the
221
221
 
222
222
  @returns A result object that contains the embedding, the value, and additional information.
223
223
  */
224
- declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
224
+ declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
225
225
  /**
226
226
  The embedding model to use.
227
227
  */
@@ -286,7 +286,7 @@ has a limit on how many embeddings can be generated in a single call.
286
286
 
287
287
  @returns A result object that contains the embeddings, the value, and additional information.
288
288
  */
289
- declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
289
+ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
290
290
  /**
291
291
  The embedding model to use.
292
292
  */
@@ -626,11 +626,11 @@ type Prompt = {
626
626
  /**
627
627
  The result of a `generateObject` call.
628
628
  */
629
- interface GenerateObjectResult<T> {
629
+ interface GenerateObjectResult<OBJECT> {
630
630
  /**
631
631
  The generated object (typed according to the schema).
632
632
  */
633
- readonly object: T;
633
+ readonly object: OBJECT;
634
634
  /**
635
635
  The reason why the generation finished.
636
636
  */
@@ -1405,12 +1405,16 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1405
1405
  The result of a `generateText` call.
1406
1406
  It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
1407
1407
  */
1408
- interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1408
+ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>, OUTPUT> {
1409
1409
  /**
1410
1410
  The generated text.
1411
1411
  */
1412
1412
  readonly text: string;
1413
1413
  /**
1414
+ The generated output.
1415
+ */
1416
+ readonly experimental_output: OUTPUT;
1417
+ /**
1414
1418
  The tool calls that were made during the generation.
1415
1419
  */
1416
1420
  readonly toolCalls: ToolCallArray<TOOLS>;
@@ -1469,6 +1473,35 @@ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1469
1473
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1470
1474
  }
1471
1475
 
1476
+ interface Output<OUTPUT> {
1477
+ readonly type: 'object' | 'text';
1478
+ injectIntoSystemPrompt(options: {
1479
+ system: string | undefined;
1480
+ model: LanguageModel;
1481
+ }): string | undefined;
1482
+ responseFormat: (options: {
1483
+ model: LanguageModel;
1484
+ }) => LanguageModelV1CallOptions['responseFormat'];
1485
+ parseOutput(options: {
1486
+ text: string;
1487
+ }): OUTPUT;
1488
+ }
1489
+ declare const text: () => Output<string>;
1490
+ declare const object: <OUTPUT>({ schema: inputSchema, }: {
1491
+ schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
1492
+ }) => Output<OUTPUT>;
1493
+
1494
+ type output_Output<OUTPUT> = Output<OUTPUT>;
1495
+ declare const output_object: typeof object;
1496
+ declare const output_text: typeof text;
1497
+ declare namespace output {
1498
+ export {
1499
+ output_Output as Output,
1500
+ output_object as object,
1501
+ output_text as text,
1502
+ };
1503
+ }
1504
+
1472
1505
  /**
1473
1506
  Generate a text and call tools for a given prompt using a language model.
1474
1507
 
@@ -1515,7 +1548,7 @@ If set and supported by the model, calls will generate deterministic results.
1515
1548
  @returns
1516
1549
  A result object that contains the generated text, the results of the tool calls, and additional information.
1517
1550
  */
1518
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1551
+ declare function generateText<TOOLS extends Record<string, CoreTool>, OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1519
1552
  /**
1520
1553
  The language model to use.
1521
1554
  */
@@ -1557,6 +1590,7 @@ Limits the tools that are available for the model to call without
1557
1590
  changing the tool call and result types in the result.
1558
1591
  */
1559
1592
  experimental_activeTools?: Array<keyof TOOLS>;
1593
+ experimental_output?: Output<OUTPUT>;
1560
1594
  /**
1561
1595
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
1562
1596
  */
@@ -1568,7 +1602,7 @@ changing the tool call and result types in the result.
1568
1602
  generateId?: () => string;
1569
1603
  currentDate?: () => Date;
1570
1604
  };
1571
- }): Promise<GenerateTextResult<TOOLS>>;
1605
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1572
1606
 
1573
1607
  /**
1574
1608
  * A stream wrapper to send custom JSON-encoded data back to the client.
@@ -2255,4 +2289,4 @@ declare namespace llamaindexAdapter {
2255
2289
  };
2256
2290
  }
2257
2291
 
2258
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
2292
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
package/dist/index.d.ts CHANGED
@@ -221,7 +221,7 @@ Embed a value using an embedding model. The type of the value is defined by the
221
221
 
222
222
  @returns A result object that contains the embedding, the value, and additional information.
223
223
  */
224
- declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
224
+ declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
225
225
  /**
226
226
  The embedding model to use.
227
227
  */
@@ -286,7 +286,7 @@ has a limit on how many embeddings can be generated in a single call.
286
286
 
287
287
  @returns A result object that contains the embeddings, the value, and additional information.
288
288
  */
289
- declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
289
+ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
290
290
  /**
291
291
  The embedding model to use.
292
292
  */
@@ -626,11 +626,11 @@ type Prompt = {
626
626
  /**
627
627
  The result of a `generateObject` call.
628
628
  */
629
- interface GenerateObjectResult<T> {
629
+ interface GenerateObjectResult<OBJECT> {
630
630
  /**
631
631
  The generated object (typed according to the schema).
632
632
  */
633
- readonly object: T;
633
+ readonly object: OBJECT;
634
634
  /**
635
635
  The reason why the generation finished.
636
636
  */
@@ -1405,12 +1405,16 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1405
1405
  The result of a `generateText` call.
1406
1406
  It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
1407
1407
  */
1408
- interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1408
+ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>, OUTPUT> {
1409
1409
  /**
1410
1410
  The generated text.
1411
1411
  */
1412
1412
  readonly text: string;
1413
1413
  /**
1414
+ The generated output.
1415
+ */
1416
+ readonly experimental_output: OUTPUT;
1417
+ /**
1414
1418
  The tool calls that were made during the generation.
1415
1419
  */
1416
1420
  readonly toolCalls: ToolCallArray<TOOLS>;
@@ -1469,6 +1473,35 @@ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1469
1473
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1470
1474
  }
1471
1475
 
1476
+ interface Output<OUTPUT> {
1477
+ readonly type: 'object' | 'text';
1478
+ injectIntoSystemPrompt(options: {
1479
+ system: string | undefined;
1480
+ model: LanguageModel;
1481
+ }): string | undefined;
1482
+ responseFormat: (options: {
1483
+ model: LanguageModel;
1484
+ }) => LanguageModelV1CallOptions['responseFormat'];
1485
+ parseOutput(options: {
1486
+ text: string;
1487
+ }): OUTPUT;
1488
+ }
1489
+ declare const text: () => Output<string>;
1490
+ declare const object: <OUTPUT>({ schema: inputSchema, }: {
1491
+ schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
1492
+ }) => Output<OUTPUT>;
1493
+
1494
+ type output_Output<OUTPUT> = Output<OUTPUT>;
1495
+ declare const output_object: typeof object;
1496
+ declare const output_text: typeof text;
1497
+ declare namespace output {
1498
+ export {
1499
+ output_Output as Output,
1500
+ output_object as object,
1501
+ output_text as text,
1502
+ };
1503
+ }
1504
+
1472
1505
  /**
1473
1506
  Generate a text and call tools for a given prompt using a language model.
1474
1507
 
@@ -1515,7 +1548,7 @@ If set and supported by the model, calls will generate deterministic results.
1515
1548
  @returns
1516
1549
  A result object that contains the generated text, the results of the tool calls, and additional information.
1517
1550
  */
1518
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1551
+ declare function generateText<TOOLS extends Record<string, CoreTool>, OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1519
1552
  /**
1520
1553
  The language model to use.
1521
1554
  */
@@ -1557,6 +1590,7 @@ Limits the tools that are available for the model to call without
1557
1590
  changing the tool call and result types in the result.
1558
1591
  */
1559
1592
  experimental_activeTools?: Array<keyof TOOLS>;
1593
+ experimental_output?: Output<OUTPUT>;
1560
1594
  /**
1561
1595
  Callback that is called when each step (LLM call) is finished, including intermediate steps.
1562
1596
  */
@@ -1568,7 +1602,7 @@ changing the tool call and result types in the result.
1568
1602
  generateId?: () => string;
1569
1603
  currentDate?: () => Date;
1570
1604
  };
1571
- }): Promise<GenerateTextResult<TOOLS>>;
1605
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1572
1606
 
1573
1607
  /**
1574
1608
  * A stream wrapper to send custom JSON-encoded data back to the client.
@@ -2255,4 +2289,4 @@ declare namespace llamaindexAdapter {
2255
2289
  };
2256
2290
  }
2257
2291
 
2258
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
2292
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };