vargai 0.4.0-alpha112 → 0.4.0-alpha113
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/ai-sdk/file.ts +2 -2
- package/src/ai-sdk/generate-element.ts +3 -3
- package/src/ai-sdk/generate-music.ts +2 -2
- package/src/ai-sdk/generate-video.ts +2 -2
- package/src/ai-sdk/providers/editly/backends/local.ts +4 -4
- package/src/ai-sdk/providers/editly/index.ts +12 -6
- package/src/ai-sdk/providers/editly/layers.ts +14 -7
- package/src/ai-sdk/providers/editly/rendi/index.ts +3 -3
- package/src/ai-sdk/providers/elevenlabs.ts +7 -4
- package/src/ai-sdk/providers/fal.ts +11 -8
- package/src/ai-sdk/providers/google.ts +17 -6
- package/src/ai-sdk/providers/heygen.ts +3 -3
- package/src/ai-sdk/providers/higgsfield.ts +15 -9
- package/src/ai-sdk/providers/openai.ts +8 -6
- package/src/ai-sdk/providers/together.ts +4 -2
- package/src/ai-sdk/providers/varg.ts +1 -1
- package/src/react/types.ts +1 -1
package/package.json
CHANGED
|
@@ -107,7 +107,7 @@
|
|
|
107
107
|
"license": "Apache-2.0",
|
|
108
108
|
"author": "varg.ai <hello@varg.ai> (https://varg.ai)",
|
|
109
109
|
"sideEffects": false,
|
|
110
|
-
"version": "0.4.0-
|
|
110
|
+
"version": "0.4.0-alpha113",
|
|
111
111
|
"exports": {
|
|
112
112
|
".": "./src/index.ts",
|
|
113
113
|
"./ai": "./src/ai-sdk/index.ts",
|
package/src/ai-sdk/file.ts
CHANGED
|
@@ -59,7 +59,7 @@ export class File {
|
|
|
59
59
|
}
|
|
60
60
|
|
|
61
61
|
static fromUrl(url: string, mediaType?: string): File {
|
|
62
|
-
return new File({ url, mediaType });
|
|
62
|
+
return new File(mediaType != null ? { url, mediaType } : { url });
|
|
63
63
|
}
|
|
64
64
|
|
|
65
65
|
/** Hydrate a File from the render service response shape */
|
|
@@ -86,7 +86,7 @@ export class File {
|
|
|
86
86
|
return new File({
|
|
87
87
|
data: generated.uint8Array,
|
|
88
88
|
mediaType: generated.mediaType,
|
|
89
|
-
url: generated.url,
|
|
89
|
+
...(generated.url != null && { url: generated.url }),
|
|
90
90
|
});
|
|
91
91
|
}
|
|
92
92
|
|
|
@@ -77,9 +77,9 @@ export async function generateElement(
|
|
|
77
77
|
model,
|
|
78
78
|
prompt: images ? { text, images } : text,
|
|
79
79
|
n,
|
|
80
|
-
size,
|
|
81
|
-
aspectRatio,
|
|
82
|
-
seed,
|
|
80
|
+
...(size != null && { size }),
|
|
81
|
+
...(aspectRatio != null && { aspectRatio }),
|
|
82
|
+
...(seed != null && { seed }),
|
|
83
83
|
});
|
|
84
84
|
|
|
85
85
|
return {
|
|
@@ -151,8 +151,8 @@ export async function generateVideo(
|
|
|
151
151
|
seed,
|
|
152
152
|
files,
|
|
153
153
|
providerOptions,
|
|
154
|
-
abortSignal,
|
|
155
|
-
headers,
|
|
154
|
+
...(abortSignal != null && { abortSignal }),
|
|
155
|
+
...(headers != null && { headers }),
|
|
156
156
|
});
|
|
157
157
|
|
|
158
158
|
const videos = result.videos.map((v) => new DefaultGeneratedVideo(v));
|
|
@@ -35,10 +35,10 @@ export class LocalBackend implements FFmpegBackend {
|
|
|
35
35
|
|
|
36
36
|
return {
|
|
37
37
|
duration,
|
|
38
|
-
width: videoStream
|
|
39
|
-
height: videoStream
|
|
40
|
-
fps,
|
|
41
|
-
framerateStr,
|
|
38
|
+
...(videoStream?.width != null ? { width: videoStream.width } : {}),
|
|
39
|
+
...(videoStream?.height != null ? { height: videoStream.height } : {}),
|
|
40
|
+
...(fps != null ? { fps } : {}),
|
|
41
|
+
...(framerateStr != null ? { framerateStr } : {}),
|
|
42
42
|
};
|
|
43
43
|
}
|
|
44
44
|
|
|
@@ -64,7 +64,11 @@ async function getFirstVideoInfo(
|
|
|
64
64
|
for (const layer of clip.layers) {
|
|
65
65
|
if (layer.type === "video") {
|
|
66
66
|
const info = await backend.ffprobe((layer as VideoLayer).path);
|
|
67
|
-
return {
|
|
67
|
+
return {
|
|
68
|
+
...(info.width != null ? { width: info.width } : {}),
|
|
69
|
+
...(info.height != null ? { height: info.height } : {}),
|
|
70
|
+
...(info.fps != null ? { fps: info.fps } : {}),
|
|
71
|
+
};
|
|
68
72
|
}
|
|
69
73
|
}
|
|
70
74
|
}
|
|
@@ -280,7 +284,9 @@ function buildBaseClipFilter(
|
|
|
280
284
|
videoSources.push({
|
|
281
285
|
inputIndex: inputIdx,
|
|
282
286
|
cutFrom: videoLayer.cutFrom ?? 0,
|
|
283
|
-
|
|
287
|
+
...(videoLayer.mixVolume != null
|
|
288
|
+
? { mixVolume: videoLayer.mixVolume }
|
|
289
|
+
: {}),
|
|
284
290
|
});
|
|
285
291
|
}
|
|
286
292
|
inputIdx++;
|
|
@@ -793,10 +799,10 @@ export async function editly(config: EditlyConfig): Promise<EditlyResult> {
|
|
|
793
799
|
startTime: currentClipTime,
|
|
794
800
|
duration: clip.duration,
|
|
795
801
|
cutFrom,
|
|
796
|
-
mixVolume,
|
|
797
|
-
fadeInDuration
|
|
802
|
+
...(mixVolume != null ? { mixVolume } : {}),
|
|
803
|
+
...(fadeInDuration > 0 ? { fadeInDuration } : {}),
|
|
798
804
|
fadeInCurve,
|
|
799
|
-
fadeOutDuration
|
|
805
|
+
...(fadeOutDuration > 0 ? { fadeOutDuration } : {}),
|
|
800
806
|
fadeOutCurve,
|
|
801
807
|
});
|
|
802
808
|
}
|
|
@@ -1067,7 +1073,7 @@ export async function editly(config: EditlyConfig): Promise<EditlyResult> {
|
|
|
1067
1073
|
filterComplex,
|
|
1068
1074
|
outputArgs,
|
|
1069
1075
|
outputPath: outPath,
|
|
1070
|
-
verbose,
|
|
1076
|
+
...(verbose != null ? { verbose } : {}),
|
|
1071
1077
|
});
|
|
1072
1078
|
|
|
1073
1079
|
if (result.output.type === "file" && verbose) {
|
|
@@ -129,14 +129,15 @@ export function getVideoFilter(
|
|
|
129
129
|
filters.push("setsar=1");
|
|
130
130
|
filters.push("fps=30");
|
|
131
131
|
filters.push("settb=1/30");
|
|
132
|
+
const overlayDuration = layer.cutTo
|
|
133
|
+
? layer.cutTo - (layer.cutFrom ?? 0)
|
|
134
|
+
: undefined;
|
|
132
135
|
return {
|
|
133
136
|
inputs: [
|
|
134
137
|
{
|
|
135
138
|
label: inputLabel,
|
|
136
139
|
path: layer.path,
|
|
137
|
-
duration:
|
|
138
|
-
? layer.cutTo - (layer.cutFrom ?? 0)
|
|
139
|
-
: undefined,
|
|
140
|
+
...(overlayDuration != null ? { duration: overlayDuration } : {}),
|
|
140
141
|
},
|
|
141
142
|
],
|
|
142
143
|
filterComplex: `[${inputLabel}]${filters.join(",")}[${outputLabel}]`,
|
|
@@ -154,14 +155,17 @@ export function getVideoFilter(
|
|
|
154
155
|
`[${fgLabel}]scale=${width}:${height}:force_original_aspect_ratio=decrease,setsar=1[${fgLabel}fg]`,
|
|
155
156
|
`[${blurLabel}bg][${fgLabel}fg]overlay=(W-w)/2:(H-h)/2,fps=30,settb=1/30[${outputLabel}]`,
|
|
156
157
|
].join(";");
|
|
158
|
+
const containBlurDuration = layer.cutTo
|
|
159
|
+
? layer.cutTo - (layer.cutFrom ?? 0)
|
|
160
|
+
: undefined;
|
|
157
161
|
return {
|
|
158
162
|
inputs: [
|
|
159
163
|
{
|
|
160
164
|
label: inputLabel,
|
|
161
165
|
path: layer.path,
|
|
162
|
-
|
|
163
|
-
?
|
|
164
|
-
:
|
|
166
|
+
...(containBlurDuration != null
|
|
167
|
+
? { duration: containBlurDuration }
|
|
168
|
+
: {}),
|
|
165
169
|
},
|
|
166
170
|
],
|
|
167
171
|
filterComplex,
|
|
@@ -186,12 +190,15 @@ export function getVideoFilter(
|
|
|
186
190
|
filters.push("fps=30");
|
|
187
191
|
filters.push("settb=1/30");
|
|
188
192
|
|
|
193
|
+
const fullDuration = layer.cutTo
|
|
194
|
+
? layer.cutTo - (layer.cutFrom ?? 0)
|
|
195
|
+
: undefined;
|
|
189
196
|
return {
|
|
190
197
|
inputs: [
|
|
191
198
|
{
|
|
192
199
|
label: inputLabel,
|
|
193
200
|
path: layer.path,
|
|
194
|
-
|
|
201
|
+
...(fullDuration != null ? { duration: fullDuration } : {}),
|
|
195
202
|
},
|
|
196
203
|
],
|
|
197
204
|
filterComplex: `[${inputLabel}]${filters.join(",")}[${outputLabel}]`,
|
|
@@ -104,9 +104,9 @@ export class RendiBackend implements FFmpegBackend {
|
|
|
104
104
|
}
|
|
105
105
|
return {
|
|
106
106
|
duration: output.duration ?? 0,
|
|
107
|
-
width: output.width,
|
|
108
|
-
height: output.height,
|
|
109
|
-
fps: output.frame_rate,
|
|
107
|
+
...(output.width != null ? { width: output.width } : {}),
|
|
108
|
+
...(output.height != null ? { height: output.height } : {}),
|
|
109
|
+
...(output.frame_rate != null ? { fps: output.frame_rate } : {}),
|
|
110
110
|
};
|
|
111
111
|
}
|
|
112
112
|
|
|
@@ -89,7 +89,9 @@ class ElevenLabsMusicModel implements MusicModelV3 {
|
|
|
89
89
|
const elevenLabsOptions = providerOptions?.elevenlabs ?? {};
|
|
90
90
|
const audio = await this.client.music.compose({
|
|
91
91
|
prompt,
|
|
92
|
-
|
|
92
|
+
...(duration != null
|
|
93
|
+
? { musicLengthMs: Math.round(duration * 1000) }
|
|
94
|
+
: {}),
|
|
93
95
|
modelId: this.modelId,
|
|
94
96
|
...elevenLabsOptions,
|
|
95
97
|
} as Parameters<typeof this.client.music.compose>[0]);
|
|
@@ -230,9 +232,8 @@ class ElevenLabsSpeechModel implements SpeechModelV3 {
|
|
|
230
232
|
response: {
|
|
231
233
|
timestamp: new Date(),
|
|
232
234
|
modelId: this.modelId,
|
|
233
|
-
headers: undefined,
|
|
234
235
|
},
|
|
235
|
-
providerMetadata,
|
|
236
|
+
...(providerMetadata != null ? { providerMetadata } : {}),
|
|
236
237
|
};
|
|
237
238
|
}
|
|
238
239
|
}
|
|
@@ -318,7 +319,9 @@ export async function generateMusic(
|
|
|
318
319
|
|
|
319
320
|
const audio = await client.music.compose({
|
|
320
321
|
prompt,
|
|
321
|
-
|
|
322
|
+
...(durationSeconds != null
|
|
323
|
+
? { musicLengthMs: durationSeconds * 1000 }
|
|
324
|
+
: {}),
|
|
322
325
|
modelId: "music_v1",
|
|
323
326
|
});
|
|
324
327
|
|
|
@@ -90,7 +90,7 @@ function createFalCache(name: string): CacheStorage {
|
|
|
90
90
|
|
|
91
91
|
const pendingStorage = createFalCache("fal-pending");
|
|
92
92
|
|
|
93
|
-
const DEFAULT_TIMEOUT_MS =
|
|
93
|
+
const DEFAULT_TIMEOUT_MS = 30 * 60 * 1000;
|
|
94
94
|
const FAL_TIMEOUT_MS = (() => {
|
|
95
95
|
if (!process.env.FAL_TIMEOUT_MS) return DEFAULT_TIMEOUT_MS;
|
|
96
96
|
const parsed = Number.parseInt(process.env.FAL_TIMEOUT_MS, 10);
|
|
@@ -438,7 +438,7 @@ async function executeWithQueueRecovery<T>(
|
|
|
438
438
|
requestId: pending.request_id,
|
|
439
439
|
logs,
|
|
440
440
|
timeout: FAL_TIMEOUT_MS,
|
|
441
|
-
onQueueUpdate,
|
|
441
|
+
...(onQueueUpdate != null ? { onQueueUpdate } : {}),
|
|
442
442
|
});
|
|
443
443
|
const result = await fal.queue.result(pending.endpoint, {
|
|
444
444
|
requestId: pending.request_id,
|
|
@@ -490,7 +490,7 @@ async function executeWithQueueRecovery<T>(
|
|
|
490
490
|
requestId: request_id,
|
|
491
491
|
logs,
|
|
492
492
|
timeout: FAL_TIMEOUT_MS,
|
|
493
|
-
onQueueUpdate,
|
|
493
|
+
...(onQueueUpdate != null ? { onQueueUpdate } : {}),
|
|
494
494
|
});
|
|
495
495
|
|
|
496
496
|
const result = await fal.queue.result(endpoint, {
|
|
@@ -872,7 +872,7 @@ class FalVideoModel implements VideoModelV3 {
|
|
|
872
872
|
const result = await executeWithQueueRecovery<{ data: unknown }>(
|
|
873
873
|
endpoint,
|
|
874
874
|
input,
|
|
875
|
-
{ logs: true, stableKey },
|
|
875
|
+
{ logs: true, ...(stableKey != null ? { stableKey } : {}) },
|
|
876
876
|
);
|
|
877
877
|
|
|
878
878
|
const data = result.data as { video?: { url?: string } };
|
|
@@ -882,7 +882,9 @@ class FalVideoModel implements VideoModelV3 {
|
|
|
882
882
|
throw new Error("No video URL in fal response");
|
|
883
883
|
}
|
|
884
884
|
|
|
885
|
-
const videoResponse = await fetch(videoUrl, {
|
|
885
|
+
const videoResponse = await fetch(videoUrl, {
|
|
886
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
887
|
+
});
|
|
886
888
|
const videoBuffer = await videoResponse.arrayBuffer();
|
|
887
889
|
|
|
888
890
|
return {
|
|
@@ -1101,7 +1103,7 @@ class FalImageModel implements ImageModelV3 {
|
|
|
1101
1103
|
const result = await executeWithQueueRecovery<{ data: unknown }>(
|
|
1102
1104
|
finalEndpoint,
|
|
1103
1105
|
input,
|
|
1104
|
-
{ logs: true, stableKey },
|
|
1106
|
+
{ logs: true, ...(stableKey != null ? { stableKey } : {}) },
|
|
1105
1107
|
);
|
|
1106
1108
|
|
|
1107
1109
|
const data = result.data as {
|
|
@@ -1118,7 +1120,9 @@ class FalImageModel implements ImageModelV3 {
|
|
|
1118
1120
|
const imageBuffers = await Promise.all(
|
|
1119
1121
|
images.map(async (img) => {
|
|
1120
1122
|
if (!img.url) throw new Error("Image URL is missing");
|
|
1121
|
-
const response = await fetch(img.url, {
|
|
1123
|
+
const response = await fetch(img.url, {
|
|
1124
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
1125
|
+
});
|
|
1122
1126
|
return new Uint8Array(await response.arrayBuffer());
|
|
1123
1127
|
}),
|
|
1124
1128
|
);
|
|
@@ -1200,7 +1204,6 @@ class FalTranscriptionModel implements TranscriptionModelV3 {
|
|
|
1200
1204
|
response: {
|
|
1201
1205
|
timestamp: new Date(),
|
|
1202
1206
|
modelId: this.modelId,
|
|
1203
|
-
headers: undefined,
|
|
1204
1207
|
},
|
|
1205
1208
|
};
|
|
1206
1209
|
}
|
|
@@ -209,7 +209,9 @@ class GoogleVideoModel implements VideoModelV3 {
|
|
|
209
209
|
options.polling?.pollIntervalMs ?? DEFAULT_POLL_INTERVAL_MS;
|
|
210
210
|
this.maxPollDurationMs =
|
|
211
211
|
options.polling?.maxPollDurationMs ?? DEFAULT_MAX_POLL_DURATION_MS;
|
|
212
|
-
|
|
212
|
+
if (options.polling?.onProgress != null) {
|
|
213
|
+
this.onProgress = options.polling.onProgress;
|
|
214
|
+
}
|
|
213
215
|
}
|
|
214
216
|
|
|
215
217
|
async doGenerate(options: VideoModelV3CallOptions) {
|
|
@@ -269,7 +271,9 @@ class GoogleVideoModel implements VideoModelV3 {
|
|
|
269
271
|
mimeType: imageFile.mediaType ?? "image/png",
|
|
270
272
|
};
|
|
271
273
|
} else {
|
|
272
|
-
const response = await fetch(imageFile.url, {
|
|
274
|
+
const response = await fetch(imageFile.url, {
|
|
275
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
276
|
+
});
|
|
273
277
|
if (!response.ok) {
|
|
274
278
|
throw new Error(
|
|
275
279
|
`Failed to fetch image from ${imageFile.url}: ${response.status} ${response.statusText}`,
|
|
@@ -286,7 +290,7 @@ class GoogleVideoModel implements VideoModelV3 {
|
|
|
286
290
|
let operation = await this.client.models.generateVideos({
|
|
287
291
|
model,
|
|
288
292
|
prompt,
|
|
289
|
-
image,
|
|
293
|
+
...(image != null ? { image } : {}),
|
|
290
294
|
config,
|
|
291
295
|
});
|
|
292
296
|
|
|
@@ -336,7 +340,9 @@ class GoogleVideoModel implements VideoModelV3 {
|
|
|
336
340
|
|
|
337
341
|
const videoUrl = `${videoUri}&key=${this.apiKey}`;
|
|
338
342
|
|
|
339
|
-
const response = await fetch(videoUrl, {
|
|
343
|
+
const response = await fetch(videoUrl, {
|
|
344
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
345
|
+
});
|
|
340
346
|
if (!response.ok) {
|
|
341
347
|
throw new Error(`Failed to download video: ${response.statusText}`);
|
|
342
348
|
}
|
|
@@ -400,10 +406,15 @@ export function createGoogle(
|
|
|
400
406
|
return {
|
|
401
407
|
specificationVersion: "v3",
|
|
402
408
|
imageModel(modelId: string): GoogleImageModel {
|
|
403
|
-
return new GoogleImageModel(modelId, {
|
|
409
|
+
return new GoogleImageModel(modelId, {
|
|
410
|
+
...(apiKey != null ? { apiKey } : {}),
|
|
411
|
+
});
|
|
404
412
|
},
|
|
405
413
|
videoModel(modelId: string): GoogleVideoModel {
|
|
406
|
-
return new GoogleVideoModel(modelId, {
|
|
414
|
+
return new GoogleVideoModel(modelId, {
|
|
415
|
+
...(apiKey != null ? { apiKey } : {}),
|
|
416
|
+
...(polling != null ? { polling } : {}),
|
|
417
|
+
});
|
|
407
418
|
},
|
|
408
419
|
languageModel(modelId: string): LanguageModelV3 {
|
|
409
420
|
throw new NoSuchModelError({
|
|
@@ -161,7 +161,7 @@ async function pollVideoStatus(
|
|
|
161
161
|
"X-Api-Key": apiKey,
|
|
162
162
|
Accept: "application/json",
|
|
163
163
|
},
|
|
164
|
-
signal,
|
|
164
|
+
...(signal != null ? { signal } : {}),
|
|
165
165
|
},
|
|
166
166
|
);
|
|
167
167
|
|
|
@@ -331,7 +331,7 @@ class HeyGenVideoModel implements VideoModelV3 {
|
|
|
331
331
|
Accept: "application/json",
|
|
332
332
|
},
|
|
333
333
|
body: submitBody,
|
|
334
|
-
signal: abortSignal,
|
|
334
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
335
335
|
});
|
|
336
336
|
|
|
337
337
|
if (!submitRes.ok) {
|
|
@@ -353,7 +353,7 @@ class HeyGenVideoModel implements VideoModelV3 {
|
|
|
353
353
|
|
|
354
354
|
// ---- Download video ----
|
|
355
355
|
const videoRes = await fetch(statusData.video_url!, {
|
|
356
|
-
signal: abortSignal,
|
|
356
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
357
357
|
});
|
|
358
358
|
if (!videoRes.ok) {
|
|
359
359
|
throw new Error(`Failed to download HeyGen video (${videoRes.status})`);
|
|
@@ -163,9 +163,11 @@ class HiggsfieldImageModel implements ImageModelV3 {
|
|
|
163
163
|
this.apiSecret = options.apiSecret ?? process.env.HIGGSFIELD_SECRET ?? "";
|
|
164
164
|
this.baseURL = options.baseURL ?? "https://platform.higgsfield.ai";
|
|
165
165
|
this.modelSettings = {
|
|
166
|
-
styleId: options.styleId,
|
|
167
|
-
quality: options.quality,
|
|
168
|
-
|
|
166
|
+
...(options.styleId != null ? { styleId: options.styleId } : {}),
|
|
167
|
+
...(options.quality != null ? { quality: options.quality } : {}),
|
|
168
|
+
...(options.enhancePrompt != null
|
|
169
|
+
? { enhancePrompt: options.enhancePrompt }
|
|
170
|
+
: {}),
|
|
169
171
|
};
|
|
170
172
|
}
|
|
171
173
|
|
|
@@ -225,7 +227,7 @@ class HiggsfieldImageModel implements ImageModelV3 {
|
|
|
225
227
|
Accept: "application/json",
|
|
226
228
|
},
|
|
227
229
|
body: JSON.stringify(requestBody),
|
|
228
|
-
signal: abortSignal,
|
|
230
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
229
231
|
});
|
|
230
232
|
|
|
231
233
|
if (!response.ok) {
|
|
@@ -246,7 +248,9 @@ class HiggsfieldImageModel implements ImageModelV3 {
|
|
|
246
248
|
const imageUrl = await this.pollForResult(jobId, abortSignal);
|
|
247
249
|
|
|
248
250
|
// Download image
|
|
249
|
-
const imageResponse = await fetch(imageUrl, {
|
|
251
|
+
const imageResponse = await fetch(imageUrl, {
|
|
252
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
253
|
+
});
|
|
250
254
|
const imageBuffer = new Uint8Array(await imageResponse.arrayBuffer());
|
|
251
255
|
|
|
252
256
|
return {
|
|
@@ -275,7 +279,7 @@ class HiggsfieldImageModel implements ImageModelV3 {
|
|
|
275
279
|
"hf-secret": this.apiSecret,
|
|
276
280
|
Accept: "application/json",
|
|
277
281
|
},
|
|
278
|
-
signal: abortSignal,
|
|
282
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
279
283
|
});
|
|
280
284
|
|
|
281
285
|
if (!response.ok) {
|
|
@@ -365,9 +369,11 @@ export function createHiggsfield(
|
|
|
365
369
|
modelSettings?: HiggsfieldImageModelSettings,
|
|
366
370
|
): ImageModelV3 {
|
|
367
371
|
return new HiggsfieldImageModel(modelId, {
|
|
368
|
-
apiKey: settings.apiKey,
|
|
369
|
-
|
|
370
|
-
|
|
372
|
+
...(settings.apiKey != null ? { apiKey: settings.apiKey } : {}),
|
|
373
|
+
...(settings.apiSecret != null
|
|
374
|
+
? { apiSecret: settings.apiSecret }
|
|
375
|
+
: {}),
|
|
376
|
+
...(settings.baseURL != null ? { baseURL: settings.baseURL } : {}),
|
|
371
377
|
...settings.defaultModelSettings,
|
|
372
378
|
...modelSettings,
|
|
373
379
|
});
|
|
@@ -93,7 +93,9 @@ class OpenAIVideoModel implements VideoModelV3 {
|
|
|
93
93
|
: imageFile.data;
|
|
94
94
|
blob = new Blob([data], { type: imageFile.mediaType });
|
|
95
95
|
} else {
|
|
96
|
-
const response = await fetch(imageFile.url, {
|
|
96
|
+
const response = await fetch(imageFile.url, {
|
|
97
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
98
|
+
});
|
|
97
99
|
blob = await response.blob();
|
|
98
100
|
}
|
|
99
101
|
formData.append("input_reference", blob, "input.png");
|
|
@@ -147,7 +149,7 @@ class OpenAIVideoModel implements VideoModelV3 {
|
|
|
147
149
|
Authorization: `Bearer ${this.apiKey}`,
|
|
148
150
|
},
|
|
149
151
|
body: formData,
|
|
150
|
-
signal: abortSignal,
|
|
152
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
151
153
|
});
|
|
152
154
|
|
|
153
155
|
if (!createResponse.ok) {
|
|
@@ -172,7 +174,7 @@ class OpenAIVideoModel implements VideoModelV3 {
|
|
|
172
174
|
headers: {
|
|
173
175
|
Authorization: `Bearer ${this.apiKey}`,
|
|
174
176
|
},
|
|
175
|
-
signal: abortSignal,
|
|
177
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
176
178
|
});
|
|
177
179
|
|
|
178
180
|
if (!statusResponse.ok) {
|
|
@@ -199,7 +201,7 @@ class OpenAIVideoModel implements VideoModelV3 {
|
|
|
199
201
|
headers: {
|
|
200
202
|
Authorization: `Bearer ${this.apiKey}`,
|
|
201
203
|
},
|
|
202
|
-
signal: abortSignal,
|
|
204
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
203
205
|
},
|
|
204
206
|
);
|
|
205
207
|
|
|
@@ -241,8 +243,8 @@ export function createOpenAI(
|
|
|
241
243
|
// add videoModel method
|
|
242
244
|
provider.videoModel = (modelId: VideoModelId): VideoModelV3 =>
|
|
243
245
|
new OpenAIVideoModel(modelId, {
|
|
244
|
-
apiKey: settings.apiKey,
|
|
245
|
-
baseURL: settings.baseURL,
|
|
246
|
+
...(settings.apiKey != null ? { apiKey: settings.apiKey } : {}),
|
|
247
|
+
...(settings.baseURL != null ? { baseURL: settings.baseURL } : {}),
|
|
246
248
|
});
|
|
247
249
|
|
|
248
250
|
return provider;
|
|
@@ -106,7 +106,7 @@ class TogetherImageModel implements ImageModelV3 {
|
|
|
106
106
|
"Content-Type": "application/json",
|
|
107
107
|
},
|
|
108
108
|
body: JSON.stringify(body),
|
|
109
|
-
signal: abortSignal,
|
|
109
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
110
110
|
});
|
|
111
111
|
|
|
112
112
|
if (!response.ok) {
|
|
@@ -138,7 +138,9 @@ class TogetherImageModel implements ImageModelV3 {
|
|
|
138
138
|
}
|
|
139
139
|
if (img.url) {
|
|
140
140
|
// URL response - download
|
|
141
|
-
const imgResponse = await fetch(img.url, {
|
|
141
|
+
const imgResponse = await fetch(img.url, {
|
|
142
|
+
...(abortSignal != null ? { signal: abortSignal } : {}),
|
|
143
|
+
});
|
|
142
144
|
return new Uint8Array(await imgResponse.arrayBuffer());
|
|
143
145
|
}
|
|
144
146
|
throw new Error("Image has neither url nor b64_json");
|
package/src/react/types.ts
CHANGED
|
@@ -4,10 +4,10 @@ import type {
|
|
|
4
4
|
SpeechModelV3,
|
|
5
5
|
TranscriptionModelV3,
|
|
6
6
|
} from "@ai-sdk/provider";
|
|
7
|
-
import type { FFmpegBackend } from "@/ai-sdk/providers/editly/backends";
|
|
8
7
|
import type { CacheStorage } from "../ai-sdk/cache";
|
|
9
8
|
import type { File } from "../ai-sdk/file";
|
|
10
9
|
import type { MusicModelV3 } from "../ai-sdk/music-model";
|
|
10
|
+
import type { FFmpegBackend } from "../ai-sdk/providers/editly/backends";
|
|
11
11
|
import type {
|
|
12
12
|
CropPosition,
|
|
13
13
|
Position,
|