@ai-sdk/provider 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,5 +1,76 @@
1
1
  import { JSONSchema7 } from 'json-schema';
2
2
 
3
+ /**
4
+ An embedding is a vector, i.e. an array of numbers.
5
+ It is e.g. used to represent a text as a vector of word embeddings.
6
+ */
7
+ type EmbeddingModelV1Embedding = Array<number>;
8
+
9
+ /**
10
+ Experimental: Specification for an embedding model that implements the embedding model
11
+ interface version 1.
12
+
13
+ VALUE is the type of the values that the model can embed.
14
+ This will allow us to go beyond text embeddings in the future,
15
+ e.g. to support image embeddings
16
+ */
17
+ type EmbeddingModelV1<VALUE> = {
18
+ /**
19
+ The embedding model must specify which embedding model interface
20
+ version it implements. This will allow us to evolve the embedding
21
+ model interface and retain backwards compatibility. The different
22
+ implementation versions can be handled as a discriminated union
23
+ on our side.
24
+ */
25
+ readonly specificationVersion: 'v1';
26
+ /**
27
+ Name of the provider for logging purposes.
28
+ */
29
+ readonly provider: string;
30
+ /**
31
+ Provider-specific model ID for logging purposes.
32
+ */
33
+ readonly modelId: string;
34
+ /**
35
+ Limit of how many embeddings can be generated in a single API call.
36
+ */
37
+ readonly maxEmbeddingsPerCall: number | undefined;
38
+ /**
39
+ True if the model can handle multiple embedding calls in parallel.
40
+ */
41
+ readonly supportsParallelCalls: boolean;
42
+ /**
43
+ Generates a list of embeddings for the given input text.
44
+
45
+ Naming: "do" prefix to prevent accidental direct usage of the method
46
+ by the user.
47
+ */
48
+ doEmbed(options: {
49
+ /**
50
+ List of values to embed.
51
+ */
52
+ values: Array<VALUE>;
53
+ /**
54
+ Abort signal for cancelling the operation.
55
+ */
56
+ abortSignal?: AbortSignal;
57
+ }): PromiseLike<{
58
+ /**
59
+ Generated embeddings. They are in the same order as the input values.
60
+ */
61
+ embeddings: Array<EmbeddingModelV1Embedding>;
62
+ /**
63
+ Optional raw response information for debugging purposes.
64
+ */
65
+ rawResponse?: {
66
+ /**
67
+ Response headers.
68
+ */
69
+ headers?: Record<string, string>;
70
+ };
71
+ }>;
72
+ };
73
+
3
74
  declare class APICallError extends Error {
4
75
  readonly url: string;
5
76
  readonly requestBodyValues: unknown;
@@ -167,10 +238,43 @@ declare class LoadAPIKeyError extends Error {
167
238
  };
168
239
  }
169
240
 
241
+ declare class LoadSettingError extends Error {
242
+ constructor({ message }: {
243
+ message: string;
244
+ });
245
+ static isLoadSettingError(error: unknown): error is LoadSettingError;
246
+ toJSON(): {
247
+ name: string;
248
+ message: string;
249
+ };
250
+ }
251
+
252
+ /**
253
+ Thrown when the AI provider fails to generate any content.
254
+ */
255
+ declare class NoContentGeneratedError extends Error {
256
+ readonly cause: unknown;
257
+ constructor({ message, }?: {
258
+ message?: string;
259
+ });
260
+ static isNoContentGeneratedError(error: unknown): error is NoContentGeneratedError;
261
+ toJSON(): {
262
+ name: string;
263
+ cause: unknown;
264
+ message: string;
265
+ stack: string | undefined;
266
+ };
267
+ }
268
+
269
+ /**
270
+ Thrown when the AI provider fails to generate a parsable object.
271
+ */
170
272
  declare class NoObjectGeneratedError extends Error {
171
273
  readonly cause: unknown;
172
- constructor();
173
- static isNoTextGeneratedError(error: unknown): error is NoObjectGeneratedError;
274
+ constructor({ message }?: {
275
+ message?: string;
276
+ });
277
+ static isNoObjectGeneratedError(error: unknown): error is NoObjectGeneratedError;
174
278
  toJSON(): {
175
279
  name: string;
176
280
  cause: unknown;
@@ -217,6 +321,29 @@ declare class RetryError extends Error {
217
321
  };
218
322
  }
219
323
 
324
+ declare class TooManyEmbeddingValuesForCallError extends Error {
325
+ readonly provider: string;
326
+ readonly modelId: string;
327
+ readonly maxEmbeddingsPerCall: number;
328
+ readonly values: Array<unknown>;
329
+ constructor(options: {
330
+ provider: string;
331
+ modelId: string;
332
+ maxEmbeddingsPerCall: number;
333
+ values: Array<unknown>;
334
+ });
335
+ static isInvalidPromptError(error: unknown): error is TooManyEmbeddingValuesForCallError;
336
+ toJSON(): {
337
+ name: string;
338
+ message: string;
339
+ stack: string | undefined;
340
+ provider: string;
341
+ modelId: string;
342
+ maxEmbeddingsPerCall: number;
343
+ values: unknown[];
344
+ };
345
+ }
346
+
220
347
  /**
221
348
  A tool has a name, a description, and a set of parameters.
222
349
 
@@ -233,7 +360,15 @@ type LanguageModelV1FunctionTool = {
233
360
  The name of the tool. Unique within this model call.
234
361
  */
235
362
  name: string;
363
+ /**
364
+ A description of the tool. The language model uses this to understand the
365
+ tool's purpose and to provide better completion suggestions.
366
+ */
236
367
  description?: string;
368
+ /**
369
+ The parameters that the tool expects. The language model uses this to
370
+ understand the tool's input requirements and to provide matching suggestions.
371
+ */
237
372
  parameters: JSONSchema7;
238
373
  };
239
374
 
@@ -309,38 +444,38 @@ declare class UnsupportedJSONSchemaError extends Error {
309
444
 
310
445
  type LanguageModelV1CallSettings = {
311
446
  /**
312
- * Maximum number of tokens to generate.
447
+ Maximum number of tokens to generate.
313
448
  */
314
449
  maxTokens?: number;
315
450
  /**
316
- * Temperature setting.
317
- *
318
- * It is recommended to set either `temperature` or `topP`, but not both.
451
+ Temperature setting.
452
+
453
+ It is recommended to set either `temperature` or `topP`, but not both.
319
454
  */
320
455
  temperature?: number;
321
456
  /**
322
- * Nucleus sampling.
323
- *
324
- * It is recommended to set either `temperature` or `topP`, but not both.
457
+ Nucleus sampling.
458
+
459
+ It is recommended to set either `temperature` or `topP`, but not both.
325
460
  */
326
461
  topP?: number;
327
462
  /**
328
- * Presence penalty setting. It affects the likelihood of the model to
329
- * repeat information that is already in the prompt.
463
+ Presence penalty setting. It affects the likelihood of the model to
464
+ repeat information that is already in the prompt.
330
465
  */
331
466
  presencePenalty?: number;
332
467
  /**
333
- * Frequency penalty setting. It affects the likelihood of the model
334
- * to repeatedly use the same words or phrases.
468
+ Frequency penalty setting. It affects the likelihood of the model
469
+ to repeatedly use the same words or phrases.
335
470
  */
336
471
  frequencyPenalty?: number;
337
472
  /**
338
- * The seed (integer) to use for random sampling. If set and supported
339
- * by the model, calls will generate deterministic results.
473
+ The seed (integer) to use for random sampling. If set and supported
474
+ by the model, calls will generate deterministic results.
340
475
  */
341
476
  seed?: number;
342
477
  /**
343
- * Abort signal for cancelling the operation.
478
+ Abort signal for cancelling the operation.
344
479
  */
345
480
  abortSignal?: AbortSignal;
346
481
  };
@@ -435,19 +570,19 @@ interface LanguageModelV1ToolResultPart {
435
570
 
436
571
  type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
437
572
  /**
438
- * Whether the user provided the input as messages or as
439
- * a prompt. This can help guide non-chat models in the
440
- * expansion, bc different expansions can be needed for
441
- * chat/non-chat use cases.
573
+ Whether the user provided the input as messages or as
574
+ a prompt. This can help guide non-chat models in the
575
+ expansion, bc different expansions can be needed for
576
+ chat/non-chat use cases.
442
577
  */
443
578
  inputFormat: 'messages' | 'prompt';
444
579
  /**
445
- * The mode affects the behavior of the language model. It is required to
446
- * support provider-independent streaming and generation of structured objects.
447
- * The model can take this information and e.g. configure json mode, the correct
448
- * low level grammar, etc. It can also be used to optimize the efficiency of the
449
- * streaming, e.g. tool-delta stream parts are only needed in the
450
- * object-tool mode.
580
+ The mode affects the behavior of the language model. It is required to
581
+ support provider-independent streaming and generation of structured objects.
582
+ The model can take this information and e.g. configure json mode, the correct
583
+ low level grammar, etc. It can also be used to optimize the efficiency of the
584
+ streaming, e.g. tool-delta stream parts are only needed in the
585
+ object-tool mode.
451
586
  */
452
587
  mode: {
453
588
  type: 'regular';
@@ -462,19 +597,19 @@ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
462
597
  tool: LanguageModelV1FunctionTool;
463
598
  };
464
599
  /**
465
- * A language mode prompt is a standardized prompt type.
466
- *
467
- * Note: This is **not** the user-facing prompt. The AI SDK methods will map the
468
- * user-facing prompt types such as chat or instruction prompts to this format.
469
- * That approach allows us to evolve the user facing prompts without breaking
470
- * the language model interface.
600
+ A language mode prompt is a standardized prompt type.
601
+
602
+ Note: This is **not** the user-facing prompt. The AI SDK methods will map the
603
+ user-facing prompt types such as chat or instruction prompts to this format.
604
+ That approach allows us to evolve the user facing prompts without breaking
605
+ the language model interface.
471
606
  */
472
607
  prompt: LanguageModelV1Prompt;
473
608
  };
474
609
 
475
610
  /**
476
- * Warning from the model provider for this call. The call will proceed, but e.g.
477
- * some settings might not be supported, which can lead to suboptimal results.
611
+ Warning from the model provider for this call. The call will proceed, but e.g.
612
+ some settings might not be supported, which can lead to suboptimal results.
478
613
  */
479
614
  type LanguageModelV1CallWarning = {
480
615
  type: 'unsupported-setting';
@@ -502,8 +637,8 @@ type LanguageModelV1FunctionToolCall = {
502
637
  toolCallId: string;
503
638
  toolName: string;
504
639
  /**
505
- * Stringified JSON object with the tool call arguments. Must match the
506
- * parameters schema of the tool.
640
+ Stringified JSON object with the tool call arguments. Must match the
641
+ parameters schema of the tool.
507
642
  */
508
643
  args: string;
509
644
  };
@@ -521,125 +656,124 @@ type LanguageModelV1LogProbs = Array<{
521
656
  }>;
522
657
 
523
658
  /**
524
- * Experimental: Specification for a language model that implements the language model
525
- * interface version 1.
659
+ Specification for a language model that implements the language model interface version 1.
526
660
  */
527
661
  type LanguageModelV1 = {
528
662
  /**
529
- * The language model must specify which language model interface
530
- * version it implements. This will allow us to evolve the language
531
- * model interface and retain backwards compatibility. The different
532
- * implementation versions can be handled as a discriminated union
533
- * on our side.
663
+ The language model must specify which language model interface
664
+ version it implements. This will allow us to evolve the language
665
+ model interface and retain backwards compatibility. The different
666
+ implementation versions can be handled as a discriminated union
667
+ on our side.
534
668
  */
535
669
  readonly specificationVersion: 'v1';
536
670
  /**
537
- * Name of the provider for logging purposes.
671
+ Name of the provider for logging purposes.
538
672
  */
539
673
  readonly provider: string;
540
674
  /**
541
- * Provider-specific model ID for logging purposes.
675
+ Provider-specific model ID for logging purposes.
542
676
  */
543
677
  readonly modelId: string;
544
678
  /**
545
- * Default object generation mode that should be used with this model when
546
- * no mode is specified. Should be the mode with the best results for this
547
- * model. `undefined` can be returned if object generation is not supported.
548
- *
549
- * This is needed to generate the best objects possible w/o requiring the
550
- * user to explicitly specify the object generation mode.
679
+ Default object generation mode that should be used with this model when
680
+ no mode is specified. Should be the mode with the best results for this
681
+ model. `undefined` can be returned if object generation is not supported.
682
+
683
+ This is needed to generate the best objects possible w/o requiring the
684
+ user to explicitly specify the object generation mode.
551
685
  */
552
686
  readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
553
687
  /**
554
- * Generates a language model output (non-streaming).
555
- *
556
- * Naming: "do" prefix to prevent accidental direct usage of the method
557
- * by the user.
688
+ Generates a language model output (non-streaming).
689
+
690
+ Naming: "do" prefix to prevent accidental direct usage of the method
691
+ by the user.
558
692
  */
559
693
  doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
560
694
  /**
561
- * Text that the model has generated. Can be undefined if the model
562
- * has only generated tool calls.
695
+ Text that the model has generated. Can be undefined if the model
696
+ has only generated tool calls.
563
697
  */
564
698
  text?: string;
565
699
  /**
566
- * Tool calls that the model has generated. Can be undefined if the
567
- * model has only generated text.
700
+ Tool calls that the model has generated. Can be undefined if the
701
+ model has only generated text.
568
702
  */
569
703
  toolCalls?: Array<LanguageModelV1FunctionToolCall>;
570
704
  /**
571
- * Finish reason.
705
+ Finish reason.
572
706
  */
573
707
  finishReason: LanguageModelV1FinishReason;
574
708
  /**
575
- * Usage information.
709
+ Usage information.
576
710
  */
577
711
  usage: {
578
712
  promptTokens: number;
579
713
  completionTokens: number;
580
714
  };
581
715
  /**
582
- * Raw prompt and setting information for observability provider integration.
716
+ Raw prompt and setting information for observability provider integration.
583
717
  */
584
718
  rawCall: {
585
719
  /**
586
- * Raw prompt after expansion and conversion to the format that the
587
- * provider uses to send the information to their API.
720
+ Raw prompt after expansion and conversion to the format that the
721
+ provider uses to send the information to their API.
588
722
  */
589
723
  rawPrompt: unknown;
590
724
  /**
591
- * Raw settings that are used for the API call. Includes provider-specific
592
- * settings.
725
+ Raw settings that are used for the API call. Includes provider-specific
726
+ settings.
593
727
  */
594
728
  rawSettings: Record<string, unknown>;
595
729
  };
596
730
  /**
597
- * Optional raw response information for debugging purposes.
731
+ Optional raw response information for debugging purposes.
598
732
  */
599
733
  rawResponse?: {
600
734
  /**
601
- * Response headers.
602
- */
735
+ Response headers.
736
+ */
603
737
  headers?: Record<string, string>;
604
738
  };
605
739
  warnings?: LanguageModelV1CallWarning[];
606
740
  /**
607
- * Logprobs for the completion.
608
- * `undefined` if the mode does not support logprobs or if was not enabled
741
+ Logprobs for the completion.
742
+ `undefined` if the mode does not support logprobs or if was not enabled
609
743
  */
610
744
  logprobs?: LanguageModelV1LogProbs;
611
745
  }>;
612
746
  /**
613
- * Generates a language model output (streaming).
614
- *
615
- * Naming: "do" prefix to prevent accidental direct usage of the method
616
- * by the user.
747
+ Generates a language model output (streaming).
748
+
749
+ Naming: "do" prefix to prevent accidental direct usage of the method
750
+ by the user.
617
751
  *
618
- * @return A stream of higher-level language model output parts.
752
+ @return A stream of higher-level language model output parts.
619
753
  */
620
754
  doStream(options: LanguageModelV1CallOptions): PromiseLike<{
621
755
  stream: ReadableStream<LanguageModelV1StreamPart>;
622
756
  /**
623
- * Raw prompt and setting information for observability provider integration.
757
+ Raw prompt and setting information for observability provider integration.
624
758
  */
625
759
  rawCall: {
626
760
  /**
627
- * Raw prompt after expansion and conversion to the format that the
628
- * provider uses to send the information to their API.
761
+ Raw prompt after expansion and conversion to the format that the
762
+ provider uses to send the information to their API.
629
763
  */
630
764
  rawPrompt: unknown;
631
765
  /**
632
- * Raw settings that are used for the API call. Includes provider-specific
633
- * settings.
766
+ Raw settings that are used for the API call. Includes provider-specific
767
+ settings.
634
768
  */
635
769
  rawSettings: Record<string, unknown>;
636
770
  };
637
771
  /**
638
- * Optional raw response data.
772
+ Optional raw response data.
639
773
  */
640
774
  rawResponse?: {
641
775
  /**
642
- * Response headers.
776
+ Response headers.
643
777
  */
644
778
  headers?: Record<string, string>;
645
779
  };
@@ -671,4 +805,4 @@ type LanguageModelV1StreamPart = {
671
805
  };
672
806
  type LanguageModelV1ResponseMetadata = {};
673
807
 
674
- export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, type LanguageModelV1, type LanguageModelV1CallOptions, type LanguageModelV1CallWarning, type LanguageModelV1FinishReason, type LanguageModelV1FunctionTool, type LanguageModelV1FunctionToolCall, type LanguageModelV1ImagePart, type LanguageModelV1LogProbs, type LanguageModelV1Message, type LanguageModelV1Prompt, type LanguageModelV1ResponseMetadata, type LanguageModelV1StreamPart, type LanguageModelV1TextPart, type LanguageModelV1ToolCallPart, type LanguageModelV1ToolResultPart, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, type RetryErrorReason, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError };
808
+ export { APICallError, type EmbeddingModelV1, type EmbeddingModelV1Embedding, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, type LanguageModelV1, type LanguageModelV1CallOptions, type LanguageModelV1CallWarning, type LanguageModelV1FinishReason, type LanguageModelV1FunctionTool, type LanguageModelV1FunctionToolCall, type LanguageModelV1ImagePart, type LanguageModelV1LogProbs, type LanguageModelV1Message, type LanguageModelV1Prompt, type LanguageModelV1ResponseMetadata, type LanguageModelV1StreamPart, type LanguageModelV1TextPart, type LanguageModelV1ToolCallPart, type LanguageModelV1ToolResultPart, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoObjectGeneratedError, NoSuchToolError, RetryError, type RetryErrorReason, TooManyEmbeddingValuesForCallError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError };
package/dist/index.js CHANGED
@@ -29,9 +29,12 @@ __export(src_exports, {
29
29
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
30
30
  JSONParseError: () => JSONParseError,
31
31
  LoadAPIKeyError: () => LoadAPIKeyError,
32
+ LoadSettingError: () => LoadSettingError,
33
+ NoContentGeneratedError: () => NoContentGeneratedError,
32
34
  NoObjectGeneratedError: () => NoObjectGeneratedError,
33
35
  NoSuchToolError: () => NoSuchToolError,
34
36
  RetryError: () => RetryError,
37
+ TooManyEmbeddingValuesForCallError: () => TooManyEmbeddingValuesForCallError,
35
38
  ToolCallParseError: () => ToolCallParseError,
36
39
  TypeValidationError: () => TypeValidationError,
37
40
  UnsupportedFunctionalityError: () => UnsupportedFunctionalityError,
@@ -286,13 +289,51 @@ var LoadAPIKeyError = class extends Error {
286
289
  }
287
290
  };
288
291
 
292
+ // src/errors/load-setting-error.ts
293
+ var LoadSettingError = class extends Error {
294
+ constructor({ message }) {
295
+ super(message);
296
+ this.name = "AI_LoadSettingError";
297
+ }
298
+ static isLoadSettingError(error) {
299
+ return error instanceof Error && error.name === "AI_LoadSettingError";
300
+ }
301
+ toJSON() {
302
+ return {
303
+ name: this.name,
304
+ message: this.message
305
+ };
306
+ }
307
+ };
308
+
309
+ // src/errors/no-content-generated-error.ts
310
+ var NoContentGeneratedError = class extends Error {
311
+ constructor({
312
+ message = "No content generated."
313
+ } = {}) {
314
+ super(message);
315
+ this.name = "AI_NoContentGeneratedError";
316
+ }
317
+ static isNoContentGeneratedError(error) {
318
+ return error instanceof Error && error.name === "AI_NoContentGeneratedError";
319
+ }
320
+ toJSON() {
321
+ return {
322
+ name: this.name,
323
+ cause: this.cause,
324
+ message: this.message,
325
+ stack: this.stack
326
+ };
327
+ }
328
+ };
329
+
289
330
  // src/errors/no-object-generated-error.ts
290
331
  var NoObjectGeneratedError = class extends Error {
291
- constructor() {
292
- super(`No object generated.`);
332
+ constructor({ message = "No object generated." } = {}) {
333
+ super(message);
293
334
  this.name = "AI_NoObjectGeneratedError";
294
335
  }
295
- static isNoTextGeneratedError(error) {
336
+ static isNoObjectGeneratedError(error) {
296
337
  return error instanceof Error && error.name === "AI_NoObjectGeneratedError";
297
338
  }
298
339
  toJSON() {
@@ -358,6 +399,34 @@ var RetryError = class extends Error {
358
399
  }
359
400
  };
360
401
 
402
+ // src/errors/too-many-embedding-values-for-call-error.ts
403
+ var TooManyEmbeddingValuesForCallError = class extends Error {
404
+ constructor(options) {
405
+ super(
406
+ `Too many values for a single embedding call. The ${options.provider} model "${options.modelId}" can only embed up to ${options.maxEmbeddingsPerCall} values per call, but ${options.values.length} values were provided.`
407
+ );
408
+ this.name = "AI_TooManyEmbeddingValuesForCallError";
409
+ this.provider = options.provider;
410
+ this.modelId = options.modelId;
411
+ this.maxEmbeddingsPerCall = options.maxEmbeddingsPerCall;
412
+ this.values = options.values;
413
+ }
414
+ static isInvalidPromptError(error) {
415
+ return error instanceof Error && error.name === "AI_TooManyEmbeddingValuesForCallError" && "provider" in error && typeof error.provider === "string" && "modelId" in error && typeof error.modelId === "string" && "maxEmbeddingsPerCall" in error && typeof error.maxEmbeddingsPerCall === "number" && "values" in error && Array.isArray(error.values);
416
+ }
417
+ toJSON() {
418
+ return {
419
+ name: this.name,
420
+ message: this.message,
421
+ stack: this.stack,
422
+ provider: this.provider,
423
+ modelId: this.modelId,
424
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
425
+ values: this.values
426
+ };
427
+ }
428
+ };
429
+
361
430
  // src/errors/tool-call-parse-error.ts
362
431
  var ToolCallParseError = class extends Error {
363
432
  constructor({
@@ -468,9 +537,12 @@ var UnsupportedJSONSchemaError = class extends Error {
468
537
  InvalidToolArgumentsError,
469
538
  JSONParseError,
470
539
  LoadAPIKeyError,
540
+ LoadSettingError,
541
+ NoContentGeneratedError,
471
542
  NoObjectGeneratedError,
472
543
  NoSuchToolError,
473
544
  RetryError,
545
+ TooManyEmbeddingValuesForCallError,
474
546
  ToolCallParseError,
475
547
  TypeValidationError,
476
548
  UnsupportedFunctionalityError,