@ai-sdk/openai-compatible 2.0.0-beta.53 → 2.0.0-beta.55

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,23 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 2.0.0-beta.55
4
+
5
+ ### Patch Changes
6
+
7
+ - 9061dc0: feat: image editing
8
+ - Updated dependencies [9061dc0]
9
+ - @ai-sdk/provider-utils@4.0.0-beta.54
10
+ - @ai-sdk/provider@3.0.0-beta.28
11
+
12
+ ## 2.0.0-beta.54
13
+
14
+ ### Patch Changes
15
+
16
+ - 366f50b: chore(provider): add deprecated textEmbeddingModel and textEmbedding aliases
17
+ - Updated dependencies [366f50b]
18
+ - @ai-sdk/provider@3.0.0-beta.27
19
+ - @ai-sdk/provider-utils@4.0.0-beta.53
20
+
3
21
  ## 2.0.0-beta.53
4
22
 
5
23
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -202,7 +202,7 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
202
202
  readonly maxImagesPerCall = 10;
203
203
  get provider(): string;
204
204
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
205
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
205
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
206
206
  }
207
207
 
208
208
  interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV3, 'imageModel'> {
@@ -211,6 +211,10 @@ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPL
211
211
  chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV3;
212
212
  completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV3;
213
213
  embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
214
+ /**
215
+ * @deprecated Use `embeddingModel` instead.
216
+ */
217
+ textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
214
218
  imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV3;
215
219
  }
216
220
  interface OpenAICompatibleProviderSettings {
package/dist/index.d.ts CHANGED
@@ -202,7 +202,7 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
202
202
  readonly maxImagesPerCall = 10;
203
203
  get provider(): string;
204
204
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
205
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
205
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
206
206
  }
207
207
 
208
208
  interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV3, 'imageModel'> {
@@ -211,6 +211,10 @@ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPL
211
211
  chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV3;
212
212
  completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV3;
213
213
  embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
214
+ /**
215
+ * @deprecated Use `embeddingModel` instead.
216
+ */
217
+ textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
214
218
  imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV3;
215
219
  }
216
220
  interface OpenAICompatibleProviderSettings {
package/dist/index.js CHANGED
@@ -1360,9 +1360,11 @@ var OpenAICompatibleImageModel = class {
1360
1360
  seed,
1361
1361
  providerOptions,
1362
1362
  headers,
1363
- abortSignal
1363
+ abortSignal,
1364
+ files,
1365
+ mask
1364
1366
  }) {
1365
- var _a, _b, _c, _d, _e;
1367
+ var _a, _b, _c, _d, _e, _f, _g;
1366
1368
  const warnings = [];
1367
1369
  if (aspectRatio != null) {
1368
1370
  warnings.push({
@@ -1375,6 +1377,41 @@ var OpenAICompatibleImageModel = class {
1375
1377
  warnings.push({ type: "unsupported", feature: "seed" });
1376
1378
  }
1377
1379
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1380
+ if (files != null && files.length > 0) {
1381
+ const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils5.postFormDataToApi)({
1382
+ url: this.config.url({
1383
+ path: "/images/edits",
1384
+ modelId: this.modelId
1385
+ }),
1386
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1387
+ formData: (0, import_provider_utils5.convertToFormData)({
1388
+ model: this.modelId,
1389
+ prompt,
1390
+ image: await Promise.all(files.map((file) => fileToBlob(file))),
1391
+ mask: mask != null ? await fileToBlob(mask) : void 0,
1392
+ n,
1393
+ size,
1394
+ ...(_d = providerOptions.openai) != null ? _d : {}
1395
+ }),
1396
+ failedResponseHandler: (0, import_provider_utils5.createJsonErrorResponseHandler)(
1397
+ (_e = this.config.errorStructure) != null ? _e : defaultOpenAICompatibleErrorStructure
1398
+ ),
1399
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1400
+ openaiCompatibleImageResponseSchema
1401
+ ),
1402
+ abortSignal,
1403
+ fetch: this.config.fetch
1404
+ });
1405
+ return {
1406
+ images: response2.data.map((item) => item.b64_json),
1407
+ warnings,
1408
+ response: {
1409
+ timestamp: currentDate,
1410
+ modelId: this.modelId,
1411
+ headers: responseHeaders2
1412
+ }
1413
+ };
1414
+ }
1378
1415
  const { value: response, responseHeaders } = await (0, import_provider_utils5.postJsonToApi)({
1379
1416
  url: this.config.url({
1380
1417
  path: "/images/generations",
@@ -1386,11 +1423,11 @@ var OpenAICompatibleImageModel = class {
1386
1423
  prompt,
1387
1424
  n,
1388
1425
  size,
1389
- ...(_d = providerOptions.openai) != null ? _d : {},
1426
+ ...(_f = providerOptions.openai) != null ? _f : {},
1390
1427
  response_format: "b64_json"
1391
1428
  },
1392
1429
  failedResponseHandler: (0, import_provider_utils5.createJsonErrorResponseHandler)(
1393
- (_e = this.config.errorStructure) != null ? _e : defaultOpenAICompatibleErrorStructure
1430
+ (_g = this.config.errorStructure) != null ? _g : defaultOpenAICompatibleErrorStructure
1394
1431
  ),
1395
1432
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1396
1433
  openaiCompatibleImageResponseSchema
@@ -1412,12 +1449,19 @@ var OpenAICompatibleImageModel = class {
1412
1449
  var openaiCompatibleImageResponseSchema = import_v48.z.object({
1413
1450
  data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
1414
1451
  });
1452
+ async function fileToBlob(file) {
1453
+ if (file.type === "url") {
1454
+ return (0, import_provider_utils5.downloadBlob)(file.url);
1455
+ }
1456
+ const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils5.convertBase64ToUint8Array)(file.data);
1457
+ return new Blob([data], { type: file.mediaType });
1458
+ }
1415
1459
 
1416
1460
  // src/openai-compatible-provider.ts
1417
1461
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1418
1462
 
1419
1463
  // src/version.ts
1420
- var VERSION = true ? "2.0.0-beta.53" : "0.0.0-test";
1464
+ var VERSION = true ? "2.0.0-beta.55" : "0.0.0-test";
1421
1465
 
1422
1466
  // src/openai-compatible-provider.ts
1423
1467
  function createOpenAICompatible(options) {
@@ -1460,6 +1504,7 @@ function createOpenAICompatible(options) {
1460
1504
  provider.chatModel = createChatModel;
1461
1505
  provider.completionModel = createCompletionModel;
1462
1506
  provider.embeddingModel = createEmbeddingModel;
1507
+ provider.textEmbeddingModel = createEmbeddingModel;
1463
1508
  provider.imageModel = createImageModel;
1464
1509
  return provider;
1465
1510
  }