@blinkdotnew/dev-sdk 2.1.1 → 2.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +128 -0
- package/dist/index.d.ts +128 -0
- package/dist/index.js +168 -3
- package/dist/index.mjs +168 -3
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -481,6 +481,45 @@ interface TranscriptionRequest {
|
|
|
481
481
|
response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
|
|
482
482
|
signal?: AbortSignal;
|
|
483
483
|
}
|
|
484
|
+
interface VideoGenerationRequest {
|
|
485
|
+
prompt: string;
|
|
486
|
+
model?: string;
|
|
487
|
+
image_url?: string;
|
|
488
|
+
duration?: string;
|
|
489
|
+
aspect_ratio?: string;
|
|
490
|
+
resolution?: string;
|
|
491
|
+
negative_prompt?: string;
|
|
492
|
+
generate_audio?: boolean;
|
|
493
|
+
seed?: number;
|
|
494
|
+
cfg_scale?: number;
|
|
495
|
+
signal?: AbortSignal;
|
|
496
|
+
}
|
|
497
|
+
interface VideoGenerationResponse {
|
|
498
|
+
result: {
|
|
499
|
+
video: {
|
|
500
|
+
url: string;
|
|
501
|
+
content_type?: string;
|
|
502
|
+
file_name?: string;
|
|
503
|
+
file_size?: number;
|
|
504
|
+
};
|
|
505
|
+
seed?: number;
|
|
506
|
+
video_id?: string;
|
|
507
|
+
thumbnail?: {
|
|
508
|
+
url: string;
|
|
509
|
+
};
|
|
510
|
+
};
|
|
511
|
+
metadata?: {
|
|
512
|
+
projectId: string;
|
|
513
|
+
timestamp: string;
|
|
514
|
+
provider: string;
|
|
515
|
+
model: string;
|
|
516
|
+
};
|
|
517
|
+
usage?: {
|
|
518
|
+
creditsCharged: number;
|
|
519
|
+
costUSD: number;
|
|
520
|
+
model: string;
|
|
521
|
+
};
|
|
522
|
+
}
|
|
484
523
|
interface TranscriptionResponse {
|
|
485
524
|
text: string;
|
|
486
525
|
transcript?: string;
|
|
@@ -519,6 +558,7 @@ interface BlinkAI {
|
|
|
519
558
|
background?: "auto" | "transparent" | "opaque";
|
|
520
559
|
signal?: AbortSignal;
|
|
521
560
|
}): Promise<ImageGenerationResponse>;
|
|
561
|
+
generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
|
|
522
562
|
generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
|
|
523
563
|
transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
|
|
524
564
|
}
|
|
@@ -984,6 +1024,18 @@ declare class HttpClient {
|
|
|
984
1024
|
response_format?: string;
|
|
985
1025
|
signal?: AbortSignal;
|
|
986
1026
|
}): Promise<BlinkResponse<any>>;
|
|
1027
|
+
aiVideo(prompt: string, options?: {
|
|
1028
|
+
model?: string;
|
|
1029
|
+
image_url?: string;
|
|
1030
|
+
duration?: string;
|
|
1031
|
+
aspect_ratio?: string;
|
|
1032
|
+
resolution?: string;
|
|
1033
|
+
negative_prompt?: string;
|
|
1034
|
+
generate_audio?: boolean;
|
|
1035
|
+
seed?: number;
|
|
1036
|
+
cfg_scale?: number;
|
|
1037
|
+
signal?: AbortSignal;
|
|
1038
|
+
}): Promise<BlinkResponse<any>>;
|
|
987
1039
|
/**
|
|
988
1040
|
* Data-specific requests
|
|
989
1041
|
*/
|
|
@@ -2265,6 +2317,82 @@ declare class BlinkAIImpl implements BlinkAI {
|
|
|
2265
2317
|
n?: number;
|
|
2266
2318
|
signal?: AbortSignal;
|
|
2267
2319
|
}): Promise<ImageGenerationResponse>;
|
|
2320
|
+
/**
|
|
2321
|
+
* Generates videos from text prompts or images using AI video generation models.
|
|
2322
|
+
*
|
|
2323
|
+
* @param options - Object containing:
|
|
2324
|
+
* - `prompt`: Text description of the video to generate (required)
|
|
2325
|
+
* - `model`: Video model to use (optional). Available models:
|
|
2326
|
+
* **Text-to-Video Models:**
|
|
2327
|
+
* - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
|
|
2328
|
+
* - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
|
|
2329
|
+
* - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
|
|
2330
|
+
* - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
|
|
2331
|
+
* **Image-to-Video Models:**
|
|
2332
|
+
* - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
|
|
2333
|
+
* - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
|
|
2334
|
+
* - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
|
|
2335
|
+
* - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
|
|
2336
|
+
* - `image_url`: Source image URL for image-to-video (required for I2V models)
|
|
2337
|
+
* - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
|
|
2338
|
+
* - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
|
|
2339
|
+
* - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
|
|
2340
|
+
* - `negative_prompt`: What to avoid in generation - Veo/Kling only
|
|
2341
|
+
* - `generate_audio`: Generate audio with video (default: true)
|
|
2342
|
+
* - `seed`: For reproducibility - Veo only
|
|
2343
|
+
* - `cfg_scale`: Guidance scale (0-1) - Kling only
|
|
2344
|
+
* - Plus optional signal parameter
|
|
2345
|
+
*
|
|
2346
|
+
* @example
|
|
2347
|
+
* ```ts
|
|
2348
|
+
* // Basic text-to-video generation (uses default fast model)
|
|
2349
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2350
|
+
* prompt: "A serene sunset over the ocean with gentle waves"
|
|
2351
|
+
* });
|
|
2352
|
+
* console.log("Video URL:", result.video.url);
|
|
2353
|
+
*
|
|
2354
|
+
* // High quality with Veo 3.1
|
|
2355
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2356
|
+
* prompt: "A cinematic shot of a futuristic city at night",
|
|
2357
|
+
* model: "fal-ai/veo3.1",
|
|
2358
|
+
* resolution: "1080p",
|
|
2359
|
+
* aspect_ratio: "16:9"
|
|
2360
|
+
* });
|
|
2361
|
+
*
|
|
2362
|
+
* // Image-to-video animation
|
|
2363
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2364
|
+
* prompt: "Animate this image with gentle camera movement",
|
|
2365
|
+
* model: "fal-ai/veo3.1/fast/image-to-video",
|
|
2366
|
+
* image_url: "https://example.com/my-image.jpg",
|
|
2367
|
+
* duration: "5s"
|
|
2368
|
+
* });
|
|
2369
|
+
*
|
|
2370
|
+
* // Using Sora 2 for creative videos
|
|
2371
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2372
|
+
* prompt: "A magical forest with glowing fireflies",
|
|
2373
|
+
* model: "fal-ai/sora-2/text-to-video/pro",
|
|
2374
|
+
* duration: "8s"
|
|
2375
|
+
* });
|
|
2376
|
+
*
|
|
2377
|
+
* // Using Kling for detailed videos
|
|
2378
|
+
* const { result, usage } = await blink.ai.generateVideo({
|
|
2379
|
+
* prompt: "A professional cooking tutorial scene",
|
|
2380
|
+
* model: "fal-ai/kling-video/v2.6/pro/text-to-video",
|
|
2381
|
+
* negative_prompt: "blur, distort, low quality",
|
|
2382
|
+
* cfg_scale: 0.7
|
|
2383
|
+
* });
|
|
2384
|
+
* console.log("Credits charged:", usage?.creditsCharged);
|
|
2385
|
+
* ```
|
|
2386
|
+
*
|
|
2387
|
+
* @returns Promise<VideoGenerationResponse> - Object containing:
|
|
2388
|
+
* - `result.video.url`: URL to the generated video
|
|
2389
|
+
* - `result.video.content_type`: MIME type (video/mp4)
|
|
2390
|
+
* - `result.video.file_name`: Generated filename
|
|
2391
|
+
* - `result.video.file_size`: File size in bytes
|
|
2392
|
+
* - `metadata`: Generation metadata (projectId, timestamp, model)
|
|
2393
|
+
* - `usage`: Credits charged and cost information
|
|
2394
|
+
*/
|
|
2395
|
+
generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
|
|
2268
2396
|
/**
|
|
2269
2397
|
* Converts text to speech using AI voice synthesis models.
|
|
2270
2398
|
*
|
package/dist/index.d.ts
CHANGED
|
@@ -481,6 +481,45 @@ interface TranscriptionRequest {
|
|
|
481
481
|
response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
|
|
482
482
|
signal?: AbortSignal;
|
|
483
483
|
}
|
|
484
|
+
interface VideoGenerationRequest {
|
|
485
|
+
prompt: string;
|
|
486
|
+
model?: string;
|
|
487
|
+
image_url?: string;
|
|
488
|
+
duration?: string;
|
|
489
|
+
aspect_ratio?: string;
|
|
490
|
+
resolution?: string;
|
|
491
|
+
negative_prompt?: string;
|
|
492
|
+
generate_audio?: boolean;
|
|
493
|
+
seed?: number;
|
|
494
|
+
cfg_scale?: number;
|
|
495
|
+
signal?: AbortSignal;
|
|
496
|
+
}
|
|
497
|
+
interface VideoGenerationResponse {
|
|
498
|
+
result: {
|
|
499
|
+
video: {
|
|
500
|
+
url: string;
|
|
501
|
+
content_type?: string;
|
|
502
|
+
file_name?: string;
|
|
503
|
+
file_size?: number;
|
|
504
|
+
};
|
|
505
|
+
seed?: number;
|
|
506
|
+
video_id?: string;
|
|
507
|
+
thumbnail?: {
|
|
508
|
+
url: string;
|
|
509
|
+
};
|
|
510
|
+
};
|
|
511
|
+
metadata?: {
|
|
512
|
+
projectId: string;
|
|
513
|
+
timestamp: string;
|
|
514
|
+
provider: string;
|
|
515
|
+
model: string;
|
|
516
|
+
};
|
|
517
|
+
usage?: {
|
|
518
|
+
creditsCharged: number;
|
|
519
|
+
costUSD: number;
|
|
520
|
+
model: string;
|
|
521
|
+
};
|
|
522
|
+
}
|
|
484
523
|
interface TranscriptionResponse {
|
|
485
524
|
text: string;
|
|
486
525
|
transcript?: string;
|
|
@@ -519,6 +558,7 @@ interface BlinkAI {
|
|
|
519
558
|
background?: "auto" | "transparent" | "opaque";
|
|
520
559
|
signal?: AbortSignal;
|
|
521
560
|
}): Promise<ImageGenerationResponse>;
|
|
561
|
+
generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
|
|
522
562
|
generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
|
|
523
563
|
transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
|
|
524
564
|
}
|
|
@@ -984,6 +1024,18 @@ declare class HttpClient {
|
|
|
984
1024
|
response_format?: string;
|
|
985
1025
|
signal?: AbortSignal;
|
|
986
1026
|
}): Promise<BlinkResponse<any>>;
|
|
1027
|
+
aiVideo(prompt: string, options?: {
|
|
1028
|
+
model?: string;
|
|
1029
|
+
image_url?: string;
|
|
1030
|
+
duration?: string;
|
|
1031
|
+
aspect_ratio?: string;
|
|
1032
|
+
resolution?: string;
|
|
1033
|
+
negative_prompt?: string;
|
|
1034
|
+
generate_audio?: boolean;
|
|
1035
|
+
seed?: number;
|
|
1036
|
+
cfg_scale?: number;
|
|
1037
|
+
signal?: AbortSignal;
|
|
1038
|
+
}): Promise<BlinkResponse<any>>;
|
|
987
1039
|
/**
|
|
988
1040
|
* Data-specific requests
|
|
989
1041
|
*/
|
|
@@ -2265,6 +2317,82 @@ declare class BlinkAIImpl implements BlinkAI {
|
|
|
2265
2317
|
n?: number;
|
|
2266
2318
|
signal?: AbortSignal;
|
|
2267
2319
|
}): Promise<ImageGenerationResponse>;
|
|
2320
|
+
/**
|
|
2321
|
+
* Generates videos from text prompts or images using AI video generation models.
|
|
2322
|
+
*
|
|
2323
|
+
* @param options - Object containing:
|
|
2324
|
+
* - `prompt`: Text description of the video to generate (required)
|
|
2325
|
+
* - `model`: Video model to use (optional). Available models:
|
|
2326
|
+
* **Text-to-Video Models:**
|
|
2327
|
+
* - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
|
|
2328
|
+
* - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
|
|
2329
|
+
* - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
|
|
2330
|
+
* - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
|
|
2331
|
+
* **Image-to-Video Models:**
|
|
2332
|
+
* - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
|
|
2333
|
+
* - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
|
|
2334
|
+
* - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
|
|
2335
|
+
* - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
|
|
2336
|
+
* - `image_url`: Source image URL for image-to-video (required for I2V models)
|
|
2337
|
+
* - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
|
|
2338
|
+
* - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
|
|
2339
|
+
* - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
|
|
2340
|
+
* - `negative_prompt`: What to avoid in generation - Veo/Kling only
|
|
2341
|
+
* - `generate_audio`: Generate audio with video (default: true)
|
|
2342
|
+
* - `seed`: For reproducibility - Veo only
|
|
2343
|
+
* - `cfg_scale`: Guidance scale (0-1) - Kling only
|
|
2344
|
+
* - Plus optional signal parameter
|
|
2345
|
+
*
|
|
2346
|
+
* @example
|
|
2347
|
+
* ```ts
|
|
2348
|
+
* // Basic text-to-video generation (uses default fast model)
|
|
2349
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2350
|
+
* prompt: "A serene sunset over the ocean with gentle waves"
|
|
2351
|
+
* });
|
|
2352
|
+
* console.log("Video URL:", result.video.url);
|
|
2353
|
+
*
|
|
2354
|
+
* // High quality with Veo 3.1
|
|
2355
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2356
|
+
* prompt: "A cinematic shot of a futuristic city at night",
|
|
2357
|
+
* model: "fal-ai/veo3.1",
|
|
2358
|
+
* resolution: "1080p",
|
|
2359
|
+
* aspect_ratio: "16:9"
|
|
2360
|
+
* });
|
|
2361
|
+
*
|
|
2362
|
+
* // Image-to-video animation
|
|
2363
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2364
|
+
* prompt: "Animate this image with gentle camera movement",
|
|
2365
|
+
* model: "fal-ai/veo3.1/fast/image-to-video",
|
|
2366
|
+
* image_url: "https://example.com/my-image.jpg",
|
|
2367
|
+
* duration: "5s"
|
|
2368
|
+
* });
|
|
2369
|
+
*
|
|
2370
|
+
* // Using Sora 2 for creative videos
|
|
2371
|
+
* const { result } = await blink.ai.generateVideo({
|
|
2372
|
+
* prompt: "A magical forest with glowing fireflies",
|
|
2373
|
+
* model: "fal-ai/sora-2/text-to-video/pro",
|
|
2374
|
+
* duration: "8s"
|
|
2375
|
+
* });
|
|
2376
|
+
*
|
|
2377
|
+
* // Using Kling for detailed videos
|
|
2378
|
+
* const { result, usage } = await blink.ai.generateVideo({
|
|
2379
|
+
* prompt: "A professional cooking tutorial scene",
|
|
2380
|
+
* model: "fal-ai/kling-video/v2.6/pro/text-to-video",
|
|
2381
|
+
* negative_prompt: "blur, distort, low quality",
|
|
2382
|
+
* cfg_scale: 0.7
|
|
2383
|
+
* });
|
|
2384
|
+
* console.log("Credits charged:", usage?.creditsCharged);
|
|
2385
|
+
* ```
|
|
2386
|
+
*
|
|
2387
|
+
* @returns Promise<VideoGenerationResponse> - Object containing:
|
|
2388
|
+
* - `result.video.url`: URL to the generated video
|
|
2389
|
+
* - `result.video.content_type`: MIME type (video/mp4)
|
|
2390
|
+
* - `result.video.file_name`: Generated filename
|
|
2391
|
+
* - `result.video.file_size`: File size in bytes
|
|
2392
|
+
* - `metadata`: Generation metadata (projectId, timestamp, model)
|
|
2393
|
+
* - `usage`: Credits charged and cost information
|
|
2394
|
+
*/
|
|
2395
|
+
generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
|
|
2268
2396
|
/**
|
|
2269
2397
|
* Converts text to speech using AI voice synthesis models.
|
|
2270
2398
|
*
|
package/dist/index.js
CHANGED
|
@@ -885,6 +885,17 @@ var HttpClient = class {
|
|
|
885
885
|
signal
|
|
886
886
|
});
|
|
887
887
|
}
|
|
888
|
+
async aiVideo(prompt, options = {}) {
|
|
889
|
+
const { signal, ...body } = options;
|
|
890
|
+
return this.request(`/api/ai/${this.projectId}/video`, {
|
|
891
|
+
method: "POST",
|
|
892
|
+
body: {
|
|
893
|
+
prompt,
|
|
894
|
+
...body
|
|
895
|
+
},
|
|
896
|
+
signal
|
|
897
|
+
});
|
|
898
|
+
}
|
|
888
899
|
/**
|
|
889
900
|
* Data-specific requests
|
|
890
901
|
*/
|
|
@@ -1274,16 +1285,45 @@ var BlinkAuth = class {
|
|
|
1274
1285
|
if (!config.projectId) {
|
|
1275
1286
|
throw new Error("projectId is required for authentication");
|
|
1276
1287
|
}
|
|
1288
|
+
const detectAuthUrl = () => {
|
|
1289
|
+
if (typeof window !== "undefined") {
|
|
1290
|
+
const referrer = document.referrer;
|
|
1291
|
+
if (referrer?.includes("dev.blink.new")) {
|
|
1292
|
+
console.log("\u{1F527} Dev environment detected via referrer, using dev.blink.new for auth");
|
|
1293
|
+
return "https://dev.blink.new";
|
|
1294
|
+
}
|
|
1295
|
+
try {
|
|
1296
|
+
if (window.parent !== window) {
|
|
1297
|
+
const parentOrigin = window.parent.location.origin;
|
|
1298
|
+
if (parentOrigin?.includes("dev.blink.new")) {
|
|
1299
|
+
console.log("\u{1F527} Dev environment detected via parent origin, using dev.blink.new for auth");
|
|
1300
|
+
return "https://dev.blink.new";
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
} catch {
|
|
1304
|
+
}
|
|
1305
|
+
try {
|
|
1306
|
+
const opener = window.opener?.location?.origin;
|
|
1307
|
+
if (opener?.includes("dev.blink.new")) {
|
|
1308
|
+
console.log("\u{1F527} Dev environment detected via opener, using dev.blink.new for auth");
|
|
1309
|
+
return "https://dev.blink.new";
|
|
1310
|
+
}
|
|
1311
|
+
} catch {
|
|
1312
|
+
}
|
|
1313
|
+
}
|
|
1314
|
+
return "https://blink.new";
|
|
1315
|
+
};
|
|
1316
|
+
const defaultAuthUrl = detectAuthUrl();
|
|
1277
1317
|
this.authConfig = {
|
|
1278
1318
|
mode: "managed",
|
|
1279
1319
|
// Default mode
|
|
1280
|
-
authUrl:
|
|
1320
|
+
authUrl: defaultAuthUrl,
|
|
1281
1321
|
coreUrl: "https://core.blink.new",
|
|
1282
1322
|
detectSessionInUrl: true,
|
|
1283
1323
|
// Default to true for web compatibility
|
|
1284
1324
|
...config.auth
|
|
1285
1325
|
};
|
|
1286
|
-
this.authUrl = this.authConfig.authUrl ||
|
|
1326
|
+
this.authUrl = this.authConfig.authUrl || defaultAuthUrl;
|
|
1287
1327
|
this.coreUrl = this.authConfig.coreUrl || "https://core.blink.new";
|
|
1288
1328
|
const hostname = getLocationHostname();
|
|
1289
1329
|
if (hostname && this.authUrl === "https://blink.new" && (hostname === "localhost" || hostname === "127.0.0.1")) {
|
|
@@ -1338,7 +1378,7 @@ var BlinkAuth = class {
|
|
|
1338
1378
|
setupParentWindowListener() {
|
|
1339
1379
|
if (!isWeb || !this.isIframe || !hasWindow()) return;
|
|
1340
1380
|
window.addEventListener("message", (event) => {
|
|
1341
|
-
if (event.origin !== "https://blink.new" && event.origin !== "http://localhost:3000" && event.origin !== "http://localhost:3001") {
|
|
1381
|
+
if (event.origin !== "https://blink.new" && event.origin !== "https://dev.blink.new" && event.origin !== "http://localhost:3000" && event.origin !== "http://localhost:3001") {
|
|
1342
1382
|
return;
|
|
1343
1383
|
}
|
|
1344
1384
|
if (event.data?.type === "BLINK_AUTH_TOKENS") {
|
|
@@ -4311,6 +4351,131 @@ var BlinkAIImpl = class {
|
|
|
4311
4351
|
);
|
|
4312
4352
|
}
|
|
4313
4353
|
}
|
|
4354
|
+
/**
|
|
4355
|
+
* Generates videos from text prompts or images using AI video generation models.
|
|
4356
|
+
*
|
|
4357
|
+
* @param options - Object containing:
|
|
4358
|
+
* - `prompt`: Text description of the video to generate (required)
|
|
4359
|
+
* - `model`: Video model to use (optional). Available models:
|
|
4360
|
+
* **Text-to-Video Models:**
|
|
4361
|
+
* - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
|
|
4362
|
+
* - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
|
|
4363
|
+
* - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
|
|
4364
|
+
* - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
|
|
4365
|
+
* **Image-to-Video Models:**
|
|
4366
|
+
* - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
|
|
4367
|
+
* - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
|
|
4368
|
+
* - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
|
|
4369
|
+
* - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
|
|
4370
|
+
* - `image_url`: Source image URL for image-to-video (required for I2V models)
|
|
4371
|
+
* - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
|
|
4372
|
+
* - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
|
|
4373
|
+
* - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
|
|
4374
|
+
* - `negative_prompt`: What to avoid in generation - Veo/Kling only
|
|
4375
|
+
* - `generate_audio`: Generate audio with video (default: true)
|
|
4376
|
+
* - `seed`: For reproducibility - Veo only
|
|
4377
|
+
* - `cfg_scale`: Guidance scale (0-1) - Kling only
|
|
4378
|
+
* - Plus optional signal parameter
|
|
4379
|
+
*
|
|
4380
|
+
* @example
|
|
4381
|
+
* ```ts
|
|
4382
|
+
* // Basic text-to-video generation (uses default fast model)
|
|
4383
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4384
|
+
* prompt: "A serene sunset over the ocean with gentle waves"
|
|
4385
|
+
* });
|
|
4386
|
+
* console.log("Video URL:", result.video.url);
|
|
4387
|
+
*
|
|
4388
|
+
* // High quality with Veo 3.1
|
|
4389
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4390
|
+
* prompt: "A cinematic shot of a futuristic city at night",
|
|
4391
|
+
* model: "fal-ai/veo3.1",
|
|
4392
|
+
* resolution: "1080p",
|
|
4393
|
+
* aspect_ratio: "16:9"
|
|
4394
|
+
* });
|
|
4395
|
+
*
|
|
4396
|
+
* // Image-to-video animation
|
|
4397
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4398
|
+
* prompt: "Animate this image with gentle camera movement",
|
|
4399
|
+
* model: "fal-ai/veo3.1/fast/image-to-video",
|
|
4400
|
+
* image_url: "https://example.com/my-image.jpg",
|
|
4401
|
+
* duration: "5s"
|
|
4402
|
+
* });
|
|
4403
|
+
*
|
|
4404
|
+
* // Using Sora 2 for creative videos
|
|
4405
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4406
|
+
* prompt: "A magical forest with glowing fireflies",
|
|
4407
|
+
* model: "fal-ai/sora-2/text-to-video/pro",
|
|
4408
|
+
* duration: "8s"
|
|
4409
|
+
* });
|
|
4410
|
+
*
|
|
4411
|
+
* // Using Kling for detailed videos
|
|
4412
|
+
* const { result, usage } = await blink.ai.generateVideo({
|
|
4413
|
+
* prompt: "A professional cooking tutorial scene",
|
|
4414
|
+
* model: "fal-ai/kling-video/v2.6/pro/text-to-video",
|
|
4415
|
+
* negative_prompt: "blur, distort, low quality",
|
|
4416
|
+
* cfg_scale: 0.7
|
|
4417
|
+
* });
|
|
4418
|
+
* console.log("Credits charged:", usage?.creditsCharged);
|
|
4419
|
+
* ```
|
|
4420
|
+
*
|
|
4421
|
+
* @returns Promise<VideoGenerationResponse> - Object containing:
|
|
4422
|
+
* - `result.video.url`: URL to the generated video
|
|
4423
|
+
* - `result.video.content_type`: MIME type (video/mp4)
|
|
4424
|
+
* - `result.video.file_name`: Generated filename
|
|
4425
|
+
* - `result.video.file_size`: File size in bytes
|
|
4426
|
+
* - `metadata`: Generation metadata (projectId, timestamp, model)
|
|
4427
|
+
* - `usage`: Credits charged and cost information
|
|
4428
|
+
*/
|
|
4429
|
+
async generateVideo(options) {
|
|
4430
|
+
try {
|
|
4431
|
+
if (!options.prompt) {
|
|
4432
|
+
throw new BlinkAIError("Prompt is required");
|
|
4433
|
+
}
|
|
4434
|
+
const i2vModels = [
|
|
4435
|
+
"fal-ai/veo3.1/image-to-video",
|
|
4436
|
+
"fal-ai/veo3.1/fast/image-to-video",
|
|
4437
|
+
"fal-ai/sora-2/image-to-video/pro",
|
|
4438
|
+
"fal-ai/kling-video/v2.6/pro/image-to-video"
|
|
4439
|
+
];
|
|
4440
|
+
if (options.model && i2vModels.includes(options.model) && !options.image_url) {
|
|
4441
|
+
throw new BlinkAIError("image_url is required for image-to-video models");
|
|
4442
|
+
}
|
|
4443
|
+
if (options.image_url) {
|
|
4444
|
+
const validation = this.validateImageUrl(options.image_url);
|
|
4445
|
+
if (!validation.isValid) {
|
|
4446
|
+
throw new BlinkAIError(`Invalid image_url: ${validation.error}`);
|
|
4447
|
+
}
|
|
4448
|
+
}
|
|
4449
|
+
const response = await this.httpClient.aiVideo(
|
|
4450
|
+
options.prompt,
|
|
4451
|
+
{
|
|
4452
|
+
model: options.model,
|
|
4453
|
+
image_url: options.image_url,
|
|
4454
|
+
duration: options.duration,
|
|
4455
|
+
aspect_ratio: options.aspect_ratio,
|
|
4456
|
+
resolution: options.resolution,
|
|
4457
|
+
negative_prompt: options.negative_prompt,
|
|
4458
|
+
generate_audio: options.generate_audio,
|
|
4459
|
+
seed: options.seed,
|
|
4460
|
+
cfg_scale: options.cfg_scale,
|
|
4461
|
+
signal: options.signal
|
|
4462
|
+
}
|
|
4463
|
+
);
|
|
4464
|
+
if (!response.data?.result?.video?.url) {
|
|
4465
|
+
throw new BlinkAIError("Invalid response format: missing video URL");
|
|
4466
|
+
}
|
|
4467
|
+
return response.data;
|
|
4468
|
+
} catch (error) {
|
|
4469
|
+
if (error instanceof BlinkAIError) {
|
|
4470
|
+
throw error;
|
|
4471
|
+
}
|
|
4472
|
+
throw new BlinkAIError(
|
|
4473
|
+
`Video generation failed: ${error instanceof Error ? error.message : "Unknown error"}`,
|
|
4474
|
+
void 0,
|
|
4475
|
+
{ originalError: error }
|
|
4476
|
+
);
|
|
4477
|
+
}
|
|
4478
|
+
}
|
|
4314
4479
|
/**
|
|
4315
4480
|
* Converts text to speech using AI voice synthesis models.
|
|
4316
4481
|
*
|
package/dist/index.mjs
CHANGED
|
@@ -883,6 +883,17 @@ var HttpClient = class {
|
|
|
883
883
|
signal
|
|
884
884
|
});
|
|
885
885
|
}
|
|
886
|
+
async aiVideo(prompt, options = {}) {
|
|
887
|
+
const { signal, ...body } = options;
|
|
888
|
+
return this.request(`/api/ai/${this.projectId}/video`, {
|
|
889
|
+
method: "POST",
|
|
890
|
+
body: {
|
|
891
|
+
prompt,
|
|
892
|
+
...body
|
|
893
|
+
},
|
|
894
|
+
signal
|
|
895
|
+
});
|
|
896
|
+
}
|
|
886
897
|
/**
|
|
887
898
|
* Data-specific requests
|
|
888
899
|
*/
|
|
@@ -1272,16 +1283,45 @@ var BlinkAuth = class {
|
|
|
1272
1283
|
if (!config.projectId) {
|
|
1273
1284
|
throw new Error("projectId is required for authentication");
|
|
1274
1285
|
}
|
|
1286
|
+
const detectAuthUrl = () => {
|
|
1287
|
+
if (typeof window !== "undefined") {
|
|
1288
|
+
const referrer = document.referrer;
|
|
1289
|
+
if (referrer?.includes("dev.blink.new")) {
|
|
1290
|
+
console.log("\u{1F527} Dev environment detected via referrer, using dev.blink.new for auth");
|
|
1291
|
+
return "https://dev.blink.new";
|
|
1292
|
+
}
|
|
1293
|
+
try {
|
|
1294
|
+
if (window.parent !== window) {
|
|
1295
|
+
const parentOrigin = window.parent.location.origin;
|
|
1296
|
+
if (parentOrigin?.includes("dev.blink.new")) {
|
|
1297
|
+
console.log("\u{1F527} Dev environment detected via parent origin, using dev.blink.new for auth");
|
|
1298
|
+
return "https://dev.blink.new";
|
|
1299
|
+
}
|
|
1300
|
+
}
|
|
1301
|
+
} catch {
|
|
1302
|
+
}
|
|
1303
|
+
try {
|
|
1304
|
+
const opener = window.opener?.location?.origin;
|
|
1305
|
+
if (opener?.includes("dev.blink.new")) {
|
|
1306
|
+
console.log("\u{1F527} Dev environment detected via opener, using dev.blink.new for auth");
|
|
1307
|
+
return "https://dev.blink.new";
|
|
1308
|
+
}
|
|
1309
|
+
} catch {
|
|
1310
|
+
}
|
|
1311
|
+
}
|
|
1312
|
+
return "https://blink.new";
|
|
1313
|
+
};
|
|
1314
|
+
const defaultAuthUrl = detectAuthUrl();
|
|
1275
1315
|
this.authConfig = {
|
|
1276
1316
|
mode: "managed",
|
|
1277
1317
|
// Default mode
|
|
1278
|
-
authUrl:
|
|
1318
|
+
authUrl: defaultAuthUrl,
|
|
1279
1319
|
coreUrl: "https://core.blink.new",
|
|
1280
1320
|
detectSessionInUrl: true,
|
|
1281
1321
|
// Default to true for web compatibility
|
|
1282
1322
|
...config.auth
|
|
1283
1323
|
};
|
|
1284
|
-
this.authUrl = this.authConfig.authUrl ||
|
|
1324
|
+
this.authUrl = this.authConfig.authUrl || defaultAuthUrl;
|
|
1285
1325
|
this.coreUrl = this.authConfig.coreUrl || "https://core.blink.new";
|
|
1286
1326
|
const hostname = getLocationHostname();
|
|
1287
1327
|
if (hostname && this.authUrl === "https://blink.new" && (hostname === "localhost" || hostname === "127.0.0.1")) {
|
|
@@ -1336,7 +1376,7 @@ var BlinkAuth = class {
|
|
|
1336
1376
|
setupParentWindowListener() {
|
|
1337
1377
|
if (!isWeb || !this.isIframe || !hasWindow()) return;
|
|
1338
1378
|
window.addEventListener("message", (event) => {
|
|
1339
|
-
if (event.origin !== "https://blink.new" && event.origin !== "http://localhost:3000" && event.origin !== "http://localhost:3001") {
|
|
1379
|
+
if (event.origin !== "https://blink.new" && event.origin !== "https://dev.blink.new" && event.origin !== "http://localhost:3000" && event.origin !== "http://localhost:3001") {
|
|
1340
1380
|
return;
|
|
1341
1381
|
}
|
|
1342
1382
|
if (event.data?.type === "BLINK_AUTH_TOKENS") {
|
|
@@ -4309,6 +4349,131 @@ var BlinkAIImpl = class {
|
|
|
4309
4349
|
);
|
|
4310
4350
|
}
|
|
4311
4351
|
}
|
|
4352
|
+
/**
|
|
4353
|
+
* Generates videos from text prompts or images using AI video generation models.
|
|
4354
|
+
*
|
|
4355
|
+
* @param options - Object containing:
|
|
4356
|
+
* - `prompt`: Text description of the video to generate (required)
|
|
4357
|
+
* - `model`: Video model to use (optional). Available models:
|
|
4358
|
+
* **Text-to-Video Models:**
|
|
4359
|
+
* - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
|
|
4360
|
+
* - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
|
|
4361
|
+
* - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
|
|
4362
|
+
* - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
|
|
4363
|
+
* **Image-to-Video Models:**
|
|
4364
|
+
* - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
|
|
4365
|
+
* - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
|
|
4366
|
+
* - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
|
|
4367
|
+
* - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
|
|
4368
|
+
* - `image_url`: Source image URL for image-to-video (required for I2V models)
|
|
4369
|
+
* - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
|
|
4370
|
+
* - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
|
|
4371
|
+
* - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
|
|
4372
|
+
* - `negative_prompt`: What to avoid in generation - Veo/Kling only
|
|
4373
|
+
* - `generate_audio`: Generate audio with video (default: true)
|
|
4374
|
+
* - `seed`: For reproducibility - Veo only
|
|
4375
|
+
* - `cfg_scale`: Guidance scale (0-1) - Kling only
|
|
4376
|
+
* - Plus optional signal parameter
|
|
4377
|
+
*
|
|
4378
|
+
* @example
|
|
4379
|
+
* ```ts
|
|
4380
|
+
* // Basic text-to-video generation (uses default fast model)
|
|
4381
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4382
|
+
* prompt: "A serene sunset over the ocean with gentle waves"
|
|
4383
|
+
* });
|
|
4384
|
+
* console.log("Video URL:", result.video.url);
|
|
4385
|
+
*
|
|
4386
|
+
* // High quality with Veo 3.1
|
|
4387
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4388
|
+
* prompt: "A cinematic shot of a futuristic city at night",
|
|
4389
|
+
* model: "fal-ai/veo3.1",
|
|
4390
|
+
* resolution: "1080p",
|
|
4391
|
+
* aspect_ratio: "16:9"
|
|
4392
|
+
* });
|
|
4393
|
+
*
|
|
4394
|
+
* // Image-to-video animation
|
|
4395
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4396
|
+
* prompt: "Animate this image with gentle camera movement",
|
|
4397
|
+
* model: "fal-ai/veo3.1/fast/image-to-video",
|
|
4398
|
+
* image_url: "https://example.com/my-image.jpg",
|
|
4399
|
+
* duration: "5s"
|
|
4400
|
+
* });
|
|
4401
|
+
*
|
|
4402
|
+
* // Using Sora 2 for creative videos
|
|
4403
|
+
* const { result } = await blink.ai.generateVideo({
|
|
4404
|
+
* prompt: "A magical forest with glowing fireflies",
|
|
4405
|
+
* model: "fal-ai/sora-2/text-to-video/pro",
|
|
4406
|
+
* duration: "8s"
|
|
4407
|
+
* });
|
|
4408
|
+
*
|
|
4409
|
+
* // Using Kling for detailed videos
|
|
4410
|
+
* const { result, usage } = await blink.ai.generateVideo({
|
|
4411
|
+
* prompt: "A professional cooking tutorial scene",
|
|
4412
|
+
* model: "fal-ai/kling-video/v2.6/pro/text-to-video",
|
|
4413
|
+
* negative_prompt: "blur, distort, low quality",
|
|
4414
|
+
* cfg_scale: 0.7
|
|
4415
|
+
* });
|
|
4416
|
+
* console.log("Credits charged:", usage?.creditsCharged);
|
|
4417
|
+
* ```
|
|
4418
|
+
*
|
|
4419
|
+
* @returns Promise<VideoGenerationResponse> - Object containing:
|
|
4420
|
+
* - `result.video.url`: URL to the generated video
|
|
4421
|
+
* - `result.video.content_type`: MIME type (video/mp4)
|
|
4422
|
+
* - `result.video.file_name`: Generated filename
|
|
4423
|
+
* - `result.video.file_size`: File size in bytes
|
|
4424
|
+
* - `metadata`: Generation metadata (projectId, timestamp, model)
|
|
4425
|
+
* - `usage`: Credits charged and cost information
|
|
4426
|
+
*/
|
|
4427
|
+
async generateVideo(options) {
|
|
4428
|
+
try {
|
|
4429
|
+
if (!options.prompt) {
|
|
4430
|
+
throw new BlinkAIError("Prompt is required");
|
|
4431
|
+
}
|
|
4432
|
+
const i2vModels = [
|
|
4433
|
+
"fal-ai/veo3.1/image-to-video",
|
|
4434
|
+
"fal-ai/veo3.1/fast/image-to-video",
|
|
4435
|
+
"fal-ai/sora-2/image-to-video/pro",
|
|
4436
|
+
"fal-ai/kling-video/v2.6/pro/image-to-video"
|
|
4437
|
+
];
|
|
4438
|
+
if (options.model && i2vModels.includes(options.model) && !options.image_url) {
|
|
4439
|
+
throw new BlinkAIError("image_url is required for image-to-video models");
|
|
4440
|
+
}
|
|
4441
|
+
if (options.image_url) {
|
|
4442
|
+
const validation = this.validateImageUrl(options.image_url);
|
|
4443
|
+
if (!validation.isValid) {
|
|
4444
|
+
throw new BlinkAIError(`Invalid image_url: ${validation.error}`);
|
|
4445
|
+
}
|
|
4446
|
+
}
|
|
4447
|
+
const response = await this.httpClient.aiVideo(
|
|
4448
|
+
options.prompt,
|
|
4449
|
+
{
|
|
4450
|
+
model: options.model,
|
|
4451
|
+
image_url: options.image_url,
|
|
4452
|
+
duration: options.duration,
|
|
4453
|
+
aspect_ratio: options.aspect_ratio,
|
|
4454
|
+
resolution: options.resolution,
|
|
4455
|
+
negative_prompt: options.negative_prompt,
|
|
4456
|
+
generate_audio: options.generate_audio,
|
|
4457
|
+
seed: options.seed,
|
|
4458
|
+
cfg_scale: options.cfg_scale,
|
|
4459
|
+
signal: options.signal
|
|
4460
|
+
}
|
|
4461
|
+
);
|
|
4462
|
+
if (!response.data?.result?.video?.url) {
|
|
4463
|
+
throw new BlinkAIError("Invalid response format: missing video URL");
|
|
4464
|
+
}
|
|
4465
|
+
return response.data;
|
|
4466
|
+
} catch (error) {
|
|
4467
|
+
if (error instanceof BlinkAIError) {
|
|
4468
|
+
throw error;
|
|
4469
|
+
}
|
|
4470
|
+
throw new BlinkAIError(
|
|
4471
|
+
`Video generation failed: ${error instanceof Error ? error.message : "Unknown error"}`,
|
|
4472
|
+
void 0,
|
|
4473
|
+
{ originalError: error }
|
|
4474
|
+
);
|
|
4475
|
+
}
|
|
4476
|
+
}
|
|
4312
4477
|
/**
|
|
4313
4478
|
* Converts text to speech using AI voice synthesis models.
|
|
4314
4479
|
*
|
package/package.json
CHANGED