@ai-sdk/openai-compatible 0.2.6 → 1.0.0-canary.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,11 +1,25 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
- ## 0.2.6
3
+ ## 1.0.0-canary.1
4
4
 
5
5
  ### Patch Changes
6
6
 
7
- - Updated dependencies [2c19b9a]
8
- - @ai-sdk/provider-utils@2.2.4
7
+ - Updated dependencies [060370c]
8
+ - Updated dependencies [0c0c0b3]
9
+ - Updated dependencies [63d791d]
10
+ - @ai-sdk/provider-utils@3.0.0-canary.1
11
+
12
+ ## 1.0.0-canary.0
13
+
14
+ ### Major Changes
15
+
16
+ - d5f588f: AI SDK 5
17
+
18
+ ### Patch Changes
19
+
20
+ - Updated dependencies [d5f588f]
21
+ - @ai-sdk/provider-utils@3.0.0-canary.0
22
+ - @ai-sdk/provider@2.0.0-canary.0
9
23
 
10
24
  ## 0.2.5
11
25
 
package/dist/index.d.mts CHANGED
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1ProviderMetadata, LanguageModelV1, LanguageModelV1ObjectGenerationMode, EmbeddingModelV1, ImageModelV1, ProviderV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2ProviderMetadata, LanguageModelV2, LanguageModelV2ObjectGenerationMode, EmbeddingModelV1, ImageModelV1, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z, ZodSchema } from 'zod';
4
4
 
@@ -73,7 +73,7 @@ type MetadataExtractor = {
73
73
  */
74
74
  extractMetadata: ({ parsedBody, }: {
75
75
  parsedBody: unknown;
76
- }) => LanguageModelV1ProviderMetadata | undefined;
76
+ }) => LanguageModelV2ProviderMetadata | undefined;
77
77
  /**
78
78
  * Creates an extractor for handling streaming responses. The returned object provides
79
79
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -96,7 +96,7 @@ type MetadataExtractor = {
96
96
  * @returns Provider-specific metadata or undefined if no metadata is available.
97
97
  * The metadata should be under a key indicating the provider id.
98
98
  */
99
- buildMetadata(): LanguageModelV1ProviderMetadata | undefined;
99
+ buildMetadata(): LanguageModelV2ProviderMetadata | undefined;
100
100
  };
101
101
  };
102
102
 
@@ -115,14 +115,14 @@ type OpenAICompatibleChatConfig = {
115
115
  no mode is specified. Should be the mode with the best results for this
116
116
  model. `undefined` can be specified if object generation is not supported.
117
117
  */
118
- defaultObjectGenerationMode?: LanguageModelV1ObjectGenerationMode;
118
+ defaultObjectGenerationMode?: LanguageModelV2ObjectGenerationMode;
119
119
  /**
120
120
  * Whether the model supports structured outputs.
121
121
  */
122
122
  supportsStructuredOutputs?: boolean;
123
123
  };
124
- declare class OpenAICompatibleChatLanguageModel implements LanguageModelV1 {
125
- readonly specificationVersion = "v1";
124
+ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
125
+ readonly specificationVersion = "v2";
126
126
  readonly supportsStructuredOutputs: boolean;
127
127
  readonly modelId: OpenAICompatibleChatModelId;
128
128
  readonly settings: OpenAICompatibleChatSettings;
@@ -134,8 +134,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV1 {
134
134
  get provider(): string;
135
135
  private get providerOptionsName();
136
136
  private getArgs;
137
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
138
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
137
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
138
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
139
139
  }
140
140
 
141
141
  type OpenAICompatibleCompletionModelId = string;
@@ -180,8 +180,8 @@ type OpenAICompatibleCompletionConfig = {
180
180
  fetch?: FetchFunction;
181
181
  errorStructure?: ProviderErrorStructure<any>;
182
182
  };
183
- declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV1 {
184
- readonly specificationVersion = "v1";
183
+ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2 {
184
+ readonly specificationVersion = "v2";
185
185
  readonly defaultObjectGenerationMode: undefined;
186
186
  readonly modelId: OpenAICompatibleCompletionModelId;
187
187
  readonly settings: OpenAICompatibleCompletionSettings;
@@ -192,8 +192,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV1
192
192
  get provider(): string;
193
193
  private get providerOptionsName();
194
194
  private getArgs;
195
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
196
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
195
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
196
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
197
197
  }
198
198
 
199
199
  type OpenAICompatibleEmbeddingModelId = string;
@@ -277,11 +277,11 @@ declare class OpenAICompatibleImageModel implements ImageModelV1 {
277
277
  doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
278
278
  }
279
279
 
280
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV1, 'imageModel'> {
281
- (modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV1;
282
- languageModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV1;
283
- chatModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV1;
284
- completionModel(modelId: COMPLETION_MODEL_IDS, settings?: OpenAICompatibleCompletionSettings): LanguageModelV1;
280
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV2, 'imageModel'> {
281
+ (modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV2;
282
+ languageModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV2;
283
+ chatModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV2;
284
+ completionModel(modelId: COMPLETION_MODEL_IDS, settings?: OpenAICompatibleCompletionSettings): LanguageModelV2;
285
285
  textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS, settings?: OpenAICompatibleEmbeddingSettings): EmbeddingModelV1<string>;
286
286
  imageModel(modelId: IMAGE_MODEL_IDS, settings?: OpenAICompatibleImageSettings): ImageModelV1;
287
287
  }
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1ProviderMetadata, LanguageModelV1, LanguageModelV1ObjectGenerationMode, EmbeddingModelV1, ImageModelV1, ProviderV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2ProviderMetadata, LanguageModelV2, LanguageModelV2ObjectGenerationMode, EmbeddingModelV1, ImageModelV1, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z, ZodSchema } from 'zod';
4
4
 
@@ -73,7 +73,7 @@ type MetadataExtractor = {
73
73
  */
74
74
  extractMetadata: ({ parsedBody, }: {
75
75
  parsedBody: unknown;
76
- }) => LanguageModelV1ProviderMetadata | undefined;
76
+ }) => LanguageModelV2ProviderMetadata | undefined;
77
77
  /**
78
78
  * Creates an extractor for handling streaming responses. The returned object provides
79
79
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -96,7 +96,7 @@ type MetadataExtractor = {
96
96
  * @returns Provider-specific metadata or undefined if no metadata is available.
97
97
  * The metadata should be under a key indicating the provider id.
98
98
  */
99
- buildMetadata(): LanguageModelV1ProviderMetadata | undefined;
99
+ buildMetadata(): LanguageModelV2ProviderMetadata | undefined;
100
100
  };
101
101
  };
102
102
 
@@ -115,14 +115,14 @@ type OpenAICompatibleChatConfig = {
115
115
  no mode is specified. Should be the mode with the best results for this
116
116
  model. `undefined` can be specified if object generation is not supported.
117
117
  */
118
- defaultObjectGenerationMode?: LanguageModelV1ObjectGenerationMode;
118
+ defaultObjectGenerationMode?: LanguageModelV2ObjectGenerationMode;
119
119
  /**
120
120
  * Whether the model supports structured outputs.
121
121
  */
122
122
  supportsStructuredOutputs?: boolean;
123
123
  };
124
- declare class OpenAICompatibleChatLanguageModel implements LanguageModelV1 {
125
- readonly specificationVersion = "v1";
124
+ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
125
+ readonly specificationVersion = "v2";
126
126
  readonly supportsStructuredOutputs: boolean;
127
127
  readonly modelId: OpenAICompatibleChatModelId;
128
128
  readonly settings: OpenAICompatibleChatSettings;
@@ -134,8 +134,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV1 {
134
134
  get provider(): string;
135
135
  private get providerOptionsName();
136
136
  private getArgs;
137
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
138
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
137
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
138
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
139
139
  }
140
140
 
141
141
  type OpenAICompatibleCompletionModelId = string;
@@ -180,8 +180,8 @@ type OpenAICompatibleCompletionConfig = {
180
180
  fetch?: FetchFunction;
181
181
  errorStructure?: ProviderErrorStructure<any>;
182
182
  };
183
- declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV1 {
184
- readonly specificationVersion = "v1";
183
+ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2 {
184
+ readonly specificationVersion = "v2";
185
185
  readonly defaultObjectGenerationMode: undefined;
186
186
  readonly modelId: OpenAICompatibleCompletionModelId;
187
187
  readonly settings: OpenAICompatibleCompletionSettings;
@@ -192,8 +192,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV1
192
192
  get provider(): string;
193
193
  private get providerOptionsName();
194
194
  private getArgs;
195
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
196
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
195
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
196
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
197
197
  }
198
198
 
199
199
  type OpenAICompatibleEmbeddingModelId = string;
@@ -277,11 +277,11 @@ declare class OpenAICompatibleImageModel implements ImageModelV1 {
277
277
  doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
278
278
  }
279
279
 
280
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV1, 'imageModel'> {
281
- (modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV1;
282
- languageModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV1;
283
- chatModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV1;
284
- completionModel(modelId: COMPLETION_MODEL_IDS, settings?: OpenAICompatibleCompletionSettings): LanguageModelV1;
280
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV2, 'imageModel'> {
281
+ (modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV2;
282
+ languageModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV2;
283
+ chatModel(modelId: CHAT_MODEL_IDS, settings?: OpenAICompatibleChatSettings): LanguageModelV2;
284
+ completionModel(modelId: COMPLETION_MODEL_IDS, settings?: OpenAICompatibleCompletionSettings): LanguageModelV2;
285
285
  textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS, settings?: OpenAICompatibleEmbeddingSettings): EmbeddingModelV1<string>;
286
286
  imageModel(modelId: IMAGE_MODEL_IDS, settings?: OpenAICompatibleImageSettings): ImageModelV1;
287
287
  }
package/dist/index.js CHANGED
@@ -249,7 +249,7 @@ function prepareTools({
249
249
  var OpenAICompatibleChatLanguageModel = class {
250
250
  // type inferred via constructor
251
251
  constructor(modelId, settings, config) {
252
- this.specificationVersion = "v1";
252
+ this.specificationVersion = "v2";
253
253
  var _a, _b;
254
254
  this.modelId = modelId;
255
255
  this.settings = settings;
@@ -891,7 +891,7 @@ ${user}:`]
891
891
  var OpenAICompatibleCompletionLanguageModel = class {
892
892
  // type inferred via constructor
893
893
  constructor(modelId, settings, config) {
894
- this.specificationVersion = "v1";
894
+ this.specificationVersion = "v2";
895
895
  this.defaultObjectGenerationMode = void 0;
896
896
  var _a;
897
897
  this.modelId = modelId;