@blinkdotnew/sdk 2.1.0 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -481,6 +481,45 @@ interface TranscriptionRequest {
481
481
  response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
482
482
  signal?: AbortSignal;
483
483
  }
484
+ interface VideoGenerationRequest {
485
+ prompt: string;
486
+ model?: string;
487
+ image_url?: string;
488
+ duration?: string;
489
+ aspect_ratio?: string;
490
+ resolution?: string;
491
+ negative_prompt?: string;
492
+ generate_audio?: boolean;
493
+ seed?: number;
494
+ cfg_scale?: number;
495
+ signal?: AbortSignal;
496
+ }
497
+ interface VideoGenerationResponse {
498
+ result: {
499
+ video: {
500
+ url: string;
501
+ content_type?: string;
502
+ file_name?: string;
503
+ file_size?: number;
504
+ };
505
+ seed?: number;
506
+ video_id?: string;
507
+ thumbnail?: {
508
+ url: string;
509
+ };
510
+ };
511
+ metadata?: {
512
+ projectId: string;
513
+ timestamp: string;
514
+ provider: string;
515
+ model: string;
516
+ };
517
+ usage?: {
518
+ creditsCharged: number;
519
+ costUSD: number;
520
+ model: string;
521
+ };
522
+ }
484
523
  interface TranscriptionResponse {
485
524
  text: string;
486
525
  transcript?: string;
@@ -519,6 +558,7 @@ interface BlinkAI {
519
558
  background?: "auto" | "transparent" | "opaque";
520
559
  signal?: AbortSignal;
521
560
  }): Promise<ImageGenerationResponse>;
561
+ generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
522
562
  generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
523
563
  transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
524
564
  }
@@ -936,6 +976,18 @@ declare class HttpClient {
936
976
  response_format?: string;
937
977
  signal?: AbortSignal;
938
978
  }): Promise<BlinkResponse<any>>;
979
+ aiVideo(prompt: string, options?: {
980
+ model?: string;
981
+ image_url?: string;
982
+ duration?: string;
983
+ aspect_ratio?: string;
984
+ resolution?: string;
985
+ negative_prompt?: string;
986
+ generate_audio?: boolean;
987
+ seed?: number;
988
+ cfg_scale?: number;
989
+ signal?: AbortSignal;
990
+ }): Promise<BlinkResponse<any>>;
939
991
  /**
940
992
  * Data-specific requests
941
993
  */
@@ -990,9 +1042,9 @@ declare class HttpClient {
990
1042
 
991
1043
  /**
992
1044
  * Platform detection for cross-platform compatibility
993
- * Detects whether code is running on web, React Native, or Node.js
1045
+ * Detects whether code is running on web, React Native, Node.js, or Deno
994
1046
  */
995
- type Platform = 'web' | 'react-native' | 'node';
1047
+ type Platform = 'web' | 'react-native' | 'node' | 'deno';
996
1048
  /**
997
1049
  * Current platform
998
1050
  */
@@ -1003,7 +1055,9 @@ declare const platform: Platform;
1003
1055
  declare const isWeb: boolean;
1004
1056
  declare const isReactNative: boolean;
1005
1057
  declare const isNode: boolean;
1058
+ declare const isDeno: boolean;
1006
1059
  declare const isBrowser: boolean;
1060
+ declare const isServer: boolean;
1007
1061
 
1008
1062
  /**
1009
1063
  * Blink Auth Module - Client-side authentication management
@@ -2207,6 +2261,82 @@ declare class BlinkAIImpl implements BlinkAI {
2207
2261
  n?: number;
2208
2262
  signal?: AbortSignal;
2209
2263
  }): Promise<ImageGenerationResponse>;
2264
+ /**
2265
+ * Generates videos from text prompts or images using AI video generation models.
2266
+ *
2267
+ * @param options - Object containing:
2268
+ * - `prompt`: Text description of the video to generate (required)
2269
+ * - `model`: Video model to use (optional). Available models:
2270
+ * **Text-to-Video Models:**
2271
+ * - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
2272
+ * - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
2273
+ * - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
2274
+ * - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
2275
+ * **Image-to-Video Models:**
2276
+ * - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
2277
+ * - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
2278
+ * - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
2279
+ * - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
2280
+ * - `image_url`: Source image URL for image-to-video (required for I2V models)
2281
+ * - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
2282
+ * - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
2283
+ * - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
2284
+ * - `negative_prompt`: What to avoid in generation - Veo/Kling only
2285
+ * - `generate_audio`: Generate audio with video (default: true)
2286
+ * - `seed`: For reproducibility - Veo only
2287
+ * - `cfg_scale`: Guidance scale (0-1) - Kling only
2288
+ * - Plus optional signal parameter
2289
+ *
2290
+ * @example
2291
+ * ```ts
2292
+ * // Basic text-to-video generation (uses default fast model)
2293
+ * const { result } = await blink.ai.generateVideo({
2294
+ * prompt: "A serene sunset over the ocean with gentle waves"
2295
+ * });
2296
+ * console.log("Video URL:", result.video.url);
2297
+ *
2298
+ * // High quality with Veo 3.1
2299
+ * const { result } = await blink.ai.generateVideo({
2300
+ * prompt: "A cinematic shot of a futuristic city at night",
2301
+ * model: "fal-ai/veo3.1",
2302
+ * resolution: "1080p",
2303
+ * aspect_ratio: "16:9"
2304
+ * });
2305
+ *
2306
+ * // Image-to-video animation
2307
+ * const { result } = await blink.ai.generateVideo({
2308
+ * prompt: "Animate this image with gentle camera movement",
2309
+ * model: "fal-ai/veo3.1/fast/image-to-video",
2310
+ * image_url: "https://example.com/my-image.jpg",
2311
+ * duration: "5s"
2312
+ * });
2313
+ *
2314
+ * // Using Sora 2 for creative videos
2315
+ * const { result } = await blink.ai.generateVideo({
2316
+ * prompt: "A magical forest with glowing fireflies",
2317
+ * model: "fal-ai/sora-2/text-to-video/pro",
2318
+ * duration: "8s"
2319
+ * });
2320
+ *
2321
+ * // Using Kling for detailed videos
2322
+ * const { result, usage } = await blink.ai.generateVideo({
2323
+ * prompt: "A professional cooking tutorial scene",
2324
+ * model: "fal-ai/kling-video/v2.6/pro/text-to-video",
2325
+ * negative_prompt: "blur, distort, low quality",
2326
+ * cfg_scale: 0.7
2327
+ * });
2328
+ * console.log("Credits charged:", usage?.creditsCharged);
2329
+ * ```
2330
+ *
2331
+ * @returns Promise<VideoGenerationResponse> - Object containing:
2332
+ * - `result.video.url`: URL to the generated video
2333
+ * - `result.video.content_type`: MIME type (video/mp4)
2334
+ * - `result.video.file_name`: Generated filename
2335
+ * - `result.video.file_size`: File size in bytes
2336
+ * - `metadata`: Generation metadata (projectId, timestamp, model)
2337
+ * - `usage`: Credits charged and cost information
2338
+ */
2339
+ generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
2210
2340
  /**
2211
2341
  * Converts text to speech using AI voice synthesis models.
2212
2342
  *
@@ -2382,4 +2512,4 @@ declare class BlinkRealtimeImpl implements BlinkRealtime {
2382
2512
  onPresence(channelName: string, callback: (users: PresenceUser[]) => void): () => void;
2383
2513
  }
2384
2514
 
2385
- export { type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkUser, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type ImageGenerationRequest, type ImageGenerationResponse, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenUsage, type TranscriptionRequest, type TranscriptionResponse, type UpdateOptions, type UpsertOptions, type WebBrowserModule, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isNode, isReactNative, isWeb, platform };
2515
+ export { type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkUser, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type ImageGenerationRequest, type ImageGenerationResponse, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenUsage, type TranscriptionRequest, type TranscriptionResponse, type UpdateOptions, type UpsertOptions, type WebBrowserModule, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, platform };
package/dist/index.d.ts CHANGED
@@ -481,6 +481,45 @@ interface TranscriptionRequest {
481
481
  response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
482
482
  signal?: AbortSignal;
483
483
  }
484
+ interface VideoGenerationRequest {
485
+ prompt: string;
486
+ model?: string;
487
+ image_url?: string;
488
+ duration?: string;
489
+ aspect_ratio?: string;
490
+ resolution?: string;
491
+ negative_prompt?: string;
492
+ generate_audio?: boolean;
493
+ seed?: number;
494
+ cfg_scale?: number;
495
+ signal?: AbortSignal;
496
+ }
497
+ interface VideoGenerationResponse {
498
+ result: {
499
+ video: {
500
+ url: string;
501
+ content_type?: string;
502
+ file_name?: string;
503
+ file_size?: number;
504
+ };
505
+ seed?: number;
506
+ video_id?: string;
507
+ thumbnail?: {
508
+ url: string;
509
+ };
510
+ };
511
+ metadata?: {
512
+ projectId: string;
513
+ timestamp: string;
514
+ provider: string;
515
+ model: string;
516
+ };
517
+ usage?: {
518
+ creditsCharged: number;
519
+ costUSD: number;
520
+ model: string;
521
+ };
522
+ }
484
523
  interface TranscriptionResponse {
485
524
  text: string;
486
525
  transcript?: string;
@@ -519,6 +558,7 @@ interface BlinkAI {
519
558
  background?: "auto" | "transparent" | "opaque";
520
559
  signal?: AbortSignal;
521
560
  }): Promise<ImageGenerationResponse>;
561
+ generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
522
562
  generateSpeech(options: SpeechGenerationRequest): Promise<SpeechGenerationResponse>;
523
563
  transcribeAudio(options: TranscriptionRequest): Promise<TranscriptionResponse>;
524
564
  }
@@ -936,6 +976,18 @@ declare class HttpClient {
936
976
  response_format?: string;
937
977
  signal?: AbortSignal;
938
978
  }): Promise<BlinkResponse<any>>;
979
+ aiVideo(prompt: string, options?: {
980
+ model?: string;
981
+ image_url?: string;
982
+ duration?: string;
983
+ aspect_ratio?: string;
984
+ resolution?: string;
985
+ negative_prompt?: string;
986
+ generate_audio?: boolean;
987
+ seed?: number;
988
+ cfg_scale?: number;
989
+ signal?: AbortSignal;
990
+ }): Promise<BlinkResponse<any>>;
939
991
  /**
940
992
  * Data-specific requests
941
993
  */
@@ -990,9 +1042,9 @@ declare class HttpClient {
990
1042
 
991
1043
  /**
992
1044
  * Platform detection for cross-platform compatibility
993
- * Detects whether code is running on web, React Native, or Node.js
1045
+ * Detects whether code is running on web, React Native, Node.js, or Deno
994
1046
  */
995
- type Platform = 'web' | 'react-native' | 'node';
1047
+ type Platform = 'web' | 'react-native' | 'node' | 'deno';
996
1048
  /**
997
1049
  * Current platform
998
1050
  */
@@ -1003,7 +1055,9 @@ declare const platform: Platform;
1003
1055
  declare const isWeb: boolean;
1004
1056
  declare const isReactNative: boolean;
1005
1057
  declare const isNode: boolean;
1058
+ declare const isDeno: boolean;
1006
1059
  declare const isBrowser: boolean;
1060
+ declare const isServer: boolean;
1007
1061
 
1008
1062
  /**
1009
1063
  * Blink Auth Module - Client-side authentication management
@@ -2207,6 +2261,82 @@ declare class BlinkAIImpl implements BlinkAI {
2207
2261
  n?: number;
2208
2262
  signal?: AbortSignal;
2209
2263
  }): Promise<ImageGenerationResponse>;
2264
+ /**
2265
+ * Generates videos from text prompts or images using AI video generation models.
2266
+ *
2267
+ * @param options - Object containing:
2268
+ * - `prompt`: Text description of the video to generate (required)
2269
+ * - `model`: Video model to use (optional). Available models:
2270
+ * **Text-to-Video Models:**
2271
+ * - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
2272
+ * - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
2273
+ * - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
2274
+ * - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
2275
+ * **Image-to-Video Models:**
2276
+ * - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
2277
+ * - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
2278
+ * - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
2279
+ * - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
2280
+ * - `image_url`: Source image URL for image-to-video (required for I2V models)
2281
+ * - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
2282
+ * - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
2283
+ * - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
2284
+ * - `negative_prompt`: What to avoid in generation - Veo/Kling only
2285
+ * - `generate_audio`: Generate audio with video (default: true)
2286
+ * - `seed`: For reproducibility - Veo only
2287
+ * - `cfg_scale`: Guidance scale (0-1) - Kling only
2288
+ * - Plus optional signal parameter
2289
+ *
2290
+ * @example
2291
+ * ```ts
2292
+ * // Basic text-to-video generation (uses default fast model)
2293
+ * const { result } = await blink.ai.generateVideo({
2294
+ * prompt: "A serene sunset over the ocean with gentle waves"
2295
+ * });
2296
+ * console.log("Video URL:", result.video.url);
2297
+ *
2298
+ * // High quality with Veo 3.1
2299
+ * const { result } = await blink.ai.generateVideo({
2300
+ * prompt: "A cinematic shot of a futuristic city at night",
2301
+ * model: "fal-ai/veo3.1",
2302
+ * resolution: "1080p",
2303
+ * aspect_ratio: "16:9"
2304
+ * });
2305
+ *
2306
+ * // Image-to-video animation
2307
+ * const { result } = await blink.ai.generateVideo({
2308
+ * prompt: "Animate this image with gentle camera movement",
2309
+ * model: "fal-ai/veo3.1/fast/image-to-video",
2310
+ * image_url: "https://example.com/my-image.jpg",
2311
+ * duration: "5s"
2312
+ * });
2313
+ *
2314
+ * // Using Sora 2 for creative videos
2315
+ * const { result } = await blink.ai.generateVideo({
2316
+ * prompt: "A magical forest with glowing fireflies",
2317
+ * model: "fal-ai/sora-2/text-to-video/pro",
2318
+ * duration: "8s"
2319
+ * });
2320
+ *
2321
+ * // Using Kling for detailed videos
2322
+ * const { result, usage } = await blink.ai.generateVideo({
2323
+ * prompt: "A professional cooking tutorial scene",
2324
+ * model: "fal-ai/kling-video/v2.6/pro/text-to-video",
2325
+ * negative_prompt: "blur, distort, low quality",
2326
+ * cfg_scale: 0.7
2327
+ * });
2328
+ * console.log("Credits charged:", usage?.creditsCharged);
2329
+ * ```
2330
+ *
2331
+ * @returns Promise<VideoGenerationResponse> - Object containing:
2332
+ * - `result.video.url`: URL to the generated video
2333
+ * - `result.video.content_type`: MIME type (video/mp4)
2334
+ * - `result.video.file_name`: Generated filename
2335
+ * - `result.video.file_size`: File size in bytes
2336
+ * - `metadata`: Generation metadata (projectId, timestamp, model)
2337
+ * - `usage`: Credits charged and cost information
2338
+ */
2339
+ generateVideo(options: VideoGenerationRequest): Promise<VideoGenerationResponse>;
2210
2340
  /**
2211
2341
  * Converts text to speech using AI voice synthesis models.
2212
2342
  *
@@ -2382,4 +2512,4 @@ declare class BlinkRealtimeImpl implements BlinkRealtime {
2382
2512
  onPresence(channelName: string, callback: (users: PresenceUser[]) => void): () => void;
2383
2513
  }
2384
2514
 
2385
- export { type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkUser, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type ImageGenerationRequest, type ImageGenerationResponse, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenUsage, type TranscriptionRequest, type TranscriptionResponse, type UpdateOptions, type UpsertOptions, type WebBrowserModule, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isNode, isReactNative, isWeb, platform };
2515
+ export { type AnalyticsEvent, AsyncStorageAdapter, type AuthState, type AuthStateChangeCallback, type AuthTokens, type BlinkAI, BlinkAIImpl, type BlinkAnalytics, BlinkAnalyticsImpl, type BlinkClient, type BlinkClientConfig, type BlinkData, BlinkDataImpl, BlinkDatabase, type BlinkRealtime, BlinkRealtimeChannel, BlinkRealtimeError, BlinkRealtimeImpl, type BlinkStorage, BlinkStorageImpl, BlinkTable, type BlinkUser, type CreateOptions, type DataExtraction, type FileObject, type FilterCondition, type ImageGenerationRequest, type ImageGenerationResponse, type Message, NoOpStorageAdapter, type ObjectGenerationRequest, type ObjectGenerationResponse, type PresenceUser, type QueryOptions, type RealtimeChannel, type RealtimeGetMessagesOptions, type RealtimeMessage, type RealtimePublishOptions, type RealtimeSubscribeOptions, type SearchRequest, type SearchResponse, type SpeechGenerationRequest, type SpeechGenerationResponse, type StorageAdapter, type StorageUploadOptions, type StorageUploadResponse, type TableOperations, type TextGenerationRequest, type TextGenerationResponse, type TokenUsage, type TranscriptionRequest, type TranscriptionResponse, type UpdateOptions, type UpsertOptions, type WebBrowserModule, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, platform };
package/dist/index.js CHANGED
@@ -7,6 +7,33 @@ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require
7
7
  throw Error('Dynamic require of "' + x + '" is not supported');
8
8
  });
9
9
 
10
+ // ../core/src/platform.ts
11
+ function detectPlatform() {
12
+ if (typeof Deno !== "undefined") {
13
+ return "deno";
14
+ }
15
+ if (typeof process !== "undefined" && process.versions?.node) {
16
+ if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
17
+ return "react-native";
18
+ }
19
+ return "node";
20
+ }
21
+ if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
22
+ return "react-native";
23
+ }
24
+ if (typeof window !== "undefined" && typeof document !== "undefined") {
25
+ return "web";
26
+ }
27
+ return "node";
28
+ }
29
+ var platform = detectPlatform();
30
+ var isWeb = platform === "web";
31
+ var isReactNative = platform === "react-native";
32
+ var isNode = platform === "node";
33
+ var isDeno = platform === "deno";
34
+ var isBrowser = isWeb || isReactNative;
35
+ var isServer = isNode || isDeno;
36
+
10
37
  // ../core/src/storage-adapter.ts
11
38
  var WebStorageAdapter = class {
12
39
  getItem(key) {
@@ -92,6 +119,9 @@ var NoOpStorageAdapter = class {
92
119
  }
93
120
  };
94
121
  function getDefaultStorageAdapter() {
122
+ if (isDeno) {
123
+ return new NoOpStorageAdapter();
124
+ }
95
125
  if (typeof window !== "undefined" && typeof localStorage !== "undefined") {
96
126
  try {
97
127
  localStorage.setItem("__test__", "test");
@@ -103,28 +133,6 @@ function getDefaultStorageAdapter() {
103
133
  return new NoOpStorageAdapter();
104
134
  }
105
135
 
106
- // ../core/src/platform.ts
107
- function detectPlatform() {
108
- if (typeof process !== "undefined" && process.versions?.node) {
109
- if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
110
- return "react-native";
111
- }
112
- return "node";
113
- }
114
- if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
115
- return "react-native";
116
- }
117
- if (typeof window !== "undefined" && typeof document !== "undefined") {
118
- return "web";
119
- }
120
- return "node";
121
- }
122
- var platform = detectPlatform();
123
- var isWeb = platform === "web";
124
- var isReactNative = platform === "react-native";
125
- var isNode = platform === "node";
126
- var isBrowser = isWeb || isReactNative;
127
-
128
136
  // ../core/src/types.ts
129
137
  var BlinkError = class extends Error {
130
138
  constructor(message, code, status, details) {
@@ -877,6 +885,17 @@ var HttpClient = class {
877
885
  signal
878
886
  });
879
887
  }
888
+ async aiVideo(prompt, options = {}) {
889
+ const { signal, ...body } = options;
890
+ return this.request(`/api/ai/${this.projectId}/video`, {
891
+ method: "POST",
892
+ body: {
893
+ prompt,
894
+ ...body
895
+ },
896
+ signal
897
+ });
898
+ }
880
899
  /**
881
900
  * Data-specific requests
882
901
  */
@@ -4271,6 +4290,131 @@ var BlinkAIImpl = class {
4271
4290
  );
4272
4291
  }
4273
4292
  }
4293
+ /**
4294
+ * Generates videos from text prompts or images using AI video generation models.
4295
+ *
4296
+ * @param options - Object containing:
4297
+ * - `prompt`: Text description of the video to generate (required)
4298
+ * - `model`: Video model to use (optional). Available models:
4299
+ * **Text-to-Video Models:**
4300
+ * - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
4301
+ * - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
4302
+ * - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
4303
+ * - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
4304
+ * **Image-to-Video Models:**
4305
+ * - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
4306
+ * - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
4307
+ * - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
4308
+ * - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
4309
+ * - `image_url`: Source image URL for image-to-video (required for I2V models)
4310
+ * - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
4311
+ * - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
4312
+ * - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
4313
+ * - `negative_prompt`: What to avoid in generation - Veo/Kling only
4314
+ * - `generate_audio`: Generate audio with video (default: true)
4315
+ * - `seed`: For reproducibility - Veo only
4316
+ * - `cfg_scale`: Guidance scale (0-1) - Kling only
4317
+ * - Plus optional signal parameter
4318
+ *
4319
+ * @example
4320
+ * ```ts
4321
+ * // Basic text-to-video generation (uses default fast model)
4322
+ * const { result } = await blink.ai.generateVideo({
4323
+ * prompt: "A serene sunset over the ocean with gentle waves"
4324
+ * });
4325
+ * console.log("Video URL:", result.video.url);
4326
+ *
4327
+ * // High quality with Veo 3.1
4328
+ * const { result } = await blink.ai.generateVideo({
4329
+ * prompt: "A cinematic shot of a futuristic city at night",
4330
+ * model: "fal-ai/veo3.1",
4331
+ * resolution: "1080p",
4332
+ * aspect_ratio: "16:9"
4333
+ * });
4334
+ *
4335
+ * // Image-to-video animation
4336
+ * const { result } = await blink.ai.generateVideo({
4337
+ * prompt: "Animate this image with gentle camera movement",
4338
+ * model: "fal-ai/veo3.1/fast/image-to-video",
4339
+ * image_url: "https://example.com/my-image.jpg",
4340
+ * duration: "5s"
4341
+ * });
4342
+ *
4343
+ * // Using Sora 2 for creative videos
4344
+ * const { result } = await blink.ai.generateVideo({
4345
+ * prompt: "A magical forest with glowing fireflies",
4346
+ * model: "fal-ai/sora-2/text-to-video/pro",
4347
+ * duration: "8s"
4348
+ * });
4349
+ *
4350
+ * // Using Kling for detailed videos
4351
+ * const { result, usage } = await blink.ai.generateVideo({
4352
+ * prompt: "A professional cooking tutorial scene",
4353
+ * model: "fal-ai/kling-video/v2.6/pro/text-to-video",
4354
+ * negative_prompt: "blur, distort, low quality",
4355
+ * cfg_scale: 0.7
4356
+ * });
4357
+ * console.log("Credits charged:", usage?.creditsCharged);
4358
+ * ```
4359
+ *
4360
+ * @returns Promise<VideoGenerationResponse> - Object containing:
4361
+ * - `result.video.url`: URL to the generated video
4362
+ * - `result.video.content_type`: MIME type (video/mp4)
4363
+ * - `result.video.file_name`: Generated filename
4364
+ * - `result.video.file_size`: File size in bytes
4365
+ * - `metadata`: Generation metadata (projectId, timestamp, model)
4366
+ * - `usage`: Credits charged and cost information
4367
+ */
4368
+ async generateVideo(options) {
4369
+ try {
4370
+ if (!options.prompt) {
4371
+ throw new BlinkAIError("Prompt is required");
4372
+ }
4373
+ const i2vModels = [
4374
+ "fal-ai/veo3.1/image-to-video",
4375
+ "fal-ai/veo3.1/fast/image-to-video",
4376
+ "fal-ai/sora-2/image-to-video/pro",
4377
+ "fal-ai/kling-video/v2.6/pro/image-to-video"
4378
+ ];
4379
+ if (options.model && i2vModels.includes(options.model) && !options.image_url) {
4380
+ throw new BlinkAIError("image_url is required for image-to-video models");
4381
+ }
4382
+ if (options.image_url) {
4383
+ const validation = this.validateImageUrl(options.image_url);
4384
+ if (!validation.isValid) {
4385
+ throw new BlinkAIError(`Invalid image_url: ${validation.error}`);
4386
+ }
4387
+ }
4388
+ const response = await this.httpClient.aiVideo(
4389
+ options.prompt,
4390
+ {
4391
+ model: options.model,
4392
+ image_url: options.image_url,
4393
+ duration: options.duration,
4394
+ aspect_ratio: options.aspect_ratio,
4395
+ resolution: options.resolution,
4396
+ negative_prompt: options.negative_prompt,
4397
+ generate_audio: options.generate_audio,
4398
+ seed: options.seed,
4399
+ cfg_scale: options.cfg_scale,
4400
+ signal: options.signal
4401
+ }
4402
+ );
4403
+ if (!response.data?.result?.video?.url) {
4404
+ throw new BlinkAIError("Invalid response format: missing video URL");
4405
+ }
4406
+ return response.data;
4407
+ } catch (error) {
4408
+ if (error instanceof BlinkAIError) {
4409
+ throw error;
4410
+ }
4411
+ throw new BlinkAIError(
4412
+ `Video generation failed: ${error instanceof Error ? error.message : "Unknown error"}`,
4413
+ void 0,
4414
+ { originalError: error }
4415
+ );
4416
+ }
4417
+ }
4274
4418
  /**
4275
4419
  * Converts text to speech using AI voice synthesis models.
4276
4420
  *
@@ -5624,8 +5768,10 @@ exports.WebStorageAdapter = WebStorageAdapter;
5624
5768
  exports.createClient = createClient;
5625
5769
  exports.getDefaultStorageAdapter = getDefaultStorageAdapter;
5626
5770
  exports.isBrowser = isBrowser;
5771
+ exports.isDeno = isDeno;
5627
5772
  exports.isNode = isNode;
5628
5773
  exports.isReactNative = isReactNative;
5774
+ exports.isServer = isServer;
5629
5775
  exports.isWeb = isWeb;
5630
5776
  exports.platform = platform;
5631
5777
  //# sourceMappingURL=index.js.map
package/dist/index.mjs CHANGED
@@ -5,6 +5,33 @@ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require
5
5
  throw Error('Dynamic require of "' + x + '" is not supported');
6
6
  });
7
7
 
8
+ // ../core/src/platform.ts
9
+ function detectPlatform() {
10
+ if (typeof Deno !== "undefined") {
11
+ return "deno";
12
+ }
13
+ if (typeof process !== "undefined" && process.versions?.node) {
14
+ if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
15
+ return "react-native";
16
+ }
17
+ return "node";
18
+ }
19
+ if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
20
+ return "react-native";
21
+ }
22
+ if (typeof window !== "undefined" && typeof document !== "undefined") {
23
+ return "web";
24
+ }
25
+ return "node";
26
+ }
27
+ var platform = detectPlatform();
28
+ var isWeb = platform === "web";
29
+ var isReactNative = platform === "react-native";
30
+ var isNode = platform === "node";
31
+ var isDeno = platform === "deno";
32
+ var isBrowser = isWeb || isReactNative;
33
+ var isServer = isNode || isDeno;
34
+
8
35
  // ../core/src/storage-adapter.ts
9
36
  var WebStorageAdapter = class {
10
37
  getItem(key) {
@@ -90,6 +117,9 @@ var NoOpStorageAdapter = class {
90
117
  }
91
118
  };
92
119
  function getDefaultStorageAdapter() {
120
+ if (isDeno) {
121
+ return new NoOpStorageAdapter();
122
+ }
93
123
  if (typeof window !== "undefined" && typeof localStorage !== "undefined") {
94
124
  try {
95
125
  localStorage.setItem("__test__", "test");
@@ -101,28 +131,6 @@ function getDefaultStorageAdapter() {
101
131
  return new NoOpStorageAdapter();
102
132
  }
103
133
 
104
- // ../core/src/platform.ts
105
- function detectPlatform() {
106
- if (typeof process !== "undefined" && process.versions?.node) {
107
- if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
108
- return "react-native";
109
- }
110
- return "node";
111
- }
112
- if (typeof navigator !== "undefined" && navigator.product === "ReactNative") {
113
- return "react-native";
114
- }
115
- if (typeof window !== "undefined" && typeof document !== "undefined") {
116
- return "web";
117
- }
118
- return "node";
119
- }
120
- var platform = detectPlatform();
121
- var isWeb = platform === "web";
122
- var isReactNative = platform === "react-native";
123
- var isNode = platform === "node";
124
- var isBrowser = isWeb || isReactNative;
125
-
126
134
  // ../core/src/types.ts
127
135
  var BlinkError = class extends Error {
128
136
  constructor(message, code, status, details) {
@@ -875,6 +883,17 @@ var HttpClient = class {
875
883
  signal
876
884
  });
877
885
  }
886
+ async aiVideo(prompt, options = {}) {
887
+ const { signal, ...body } = options;
888
+ return this.request(`/api/ai/${this.projectId}/video`, {
889
+ method: "POST",
890
+ body: {
891
+ prompt,
892
+ ...body
893
+ },
894
+ signal
895
+ });
896
+ }
878
897
  /**
879
898
  * Data-specific requests
880
899
  */
@@ -4269,6 +4288,131 @@ var BlinkAIImpl = class {
4269
4288
  );
4270
4289
  }
4271
4290
  }
4291
+ /**
4292
+ * Generates videos from text prompts or images using AI video generation models.
4293
+ *
4294
+ * @param options - Object containing:
4295
+ * - `prompt`: Text description of the video to generate (required)
4296
+ * - `model`: Video model to use (optional). Available models:
4297
+ * **Text-to-Video Models:**
4298
+ * - `"fal-ai/veo3.1"` - Google Veo 3.1 (best quality)
4299
+ * - `"fal-ai/veo3.1/fast"` (default) - Veo 3.1 fast mode (faster, cheaper)
4300
+ * - `"fal-ai/sora-2/text-to-video/pro"` - OpenAI Sora 2
4301
+ * - `"fal-ai/kling-video/v2.6/pro/text-to-video"` - Kling 2.6
4302
+ * **Image-to-Video Models:**
4303
+ * - `"fal-ai/veo3.1/image-to-video"` - Veo 3.1 I2V
4304
+ * - `"fal-ai/veo3.1/fast/image-to-video"` - Veo 3.1 fast I2V
4305
+ * - `"fal-ai/sora-2/image-to-video/pro"` - Sora 2 I2V
4306
+ * - `"fal-ai/kling-video/v2.6/pro/image-to-video"` - Kling 2.6 I2V
4307
+ * - `image_url`: Source image URL for image-to-video (required for I2V models)
4308
+ * - `duration`: Video duration ("4s", "5s", "6s", "8s", "10s", "12s")
4309
+ * - `aspect_ratio`: Aspect ratio ("16:9", "9:16", "1:1")
4310
+ * - `resolution`: Resolution ("720p", "1080p") - Veo/Sora only
4311
+ * - `negative_prompt`: What to avoid in generation - Veo/Kling only
4312
+ * - `generate_audio`: Generate audio with video (default: true)
4313
+ * - `seed`: For reproducibility - Veo only
4314
+ * - `cfg_scale`: Guidance scale (0-1) - Kling only
4315
+ * - Plus optional signal parameter
4316
+ *
4317
+ * @example
4318
+ * ```ts
4319
+ * // Basic text-to-video generation (uses default fast model)
4320
+ * const { result } = await blink.ai.generateVideo({
4321
+ * prompt: "A serene sunset over the ocean with gentle waves"
4322
+ * });
4323
+ * console.log("Video URL:", result.video.url);
4324
+ *
4325
+ * // High quality with Veo 3.1
4326
+ * const { result } = await blink.ai.generateVideo({
4327
+ * prompt: "A cinematic shot of a futuristic city at night",
4328
+ * model: "fal-ai/veo3.1",
4329
+ * resolution: "1080p",
4330
+ * aspect_ratio: "16:9"
4331
+ * });
4332
+ *
4333
+ * // Image-to-video animation
4334
+ * const { result } = await blink.ai.generateVideo({
4335
+ * prompt: "Animate this image with gentle camera movement",
4336
+ * model: "fal-ai/veo3.1/fast/image-to-video",
4337
+ * image_url: "https://example.com/my-image.jpg",
4338
+ * duration: "5s"
4339
+ * });
4340
+ *
4341
+ * // Using Sora 2 for creative videos
4342
+ * const { result } = await blink.ai.generateVideo({
4343
+ * prompt: "A magical forest with glowing fireflies",
4344
+ * model: "fal-ai/sora-2/text-to-video/pro",
4345
+ * duration: "8s"
4346
+ * });
4347
+ *
4348
+ * // Using Kling for detailed videos
4349
+ * const { result, usage } = await blink.ai.generateVideo({
4350
+ * prompt: "A professional cooking tutorial scene",
4351
+ * model: "fal-ai/kling-video/v2.6/pro/text-to-video",
4352
+ * negative_prompt: "blur, distort, low quality",
4353
+ * cfg_scale: 0.7
4354
+ * });
4355
+ * console.log("Credits charged:", usage?.creditsCharged);
4356
+ * ```
4357
+ *
4358
+ * @returns Promise<VideoGenerationResponse> - Object containing:
4359
+ * - `result.video.url`: URL to the generated video
4360
+ * - `result.video.content_type`: MIME type (video/mp4)
4361
+ * - `result.video.file_name`: Generated filename
4362
+ * - `result.video.file_size`: File size in bytes
4363
+ * - `metadata`: Generation metadata (projectId, timestamp, model)
4364
+ * - `usage`: Credits charged and cost information
4365
+ */
4366
+ async generateVideo(options) {
4367
+ try {
4368
+ if (!options.prompt) {
4369
+ throw new BlinkAIError("Prompt is required");
4370
+ }
4371
+ const i2vModels = [
4372
+ "fal-ai/veo3.1/image-to-video",
4373
+ "fal-ai/veo3.1/fast/image-to-video",
4374
+ "fal-ai/sora-2/image-to-video/pro",
4375
+ "fal-ai/kling-video/v2.6/pro/image-to-video"
4376
+ ];
4377
+ if (options.model && i2vModels.includes(options.model) && !options.image_url) {
4378
+ throw new BlinkAIError("image_url is required for image-to-video models");
4379
+ }
4380
+ if (options.image_url) {
4381
+ const validation = this.validateImageUrl(options.image_url);
4382
+ if (!validation.isValid) {
4383
+ throw new BlinkAIError(`Invalid image_url: ${validation.error}`);
4384
+ }
4385
+ }
4386
+ const response = await this.httpClient.aiVideo(
4387
+ options.prompt,
4388
+ {
4389
+ model: options.model,
4390
+ image_url: options.image_url,
4391
+ duration: options.duration,
4392
+ aspect_ratio: options.aspect_ratio,
4393
+ resolution: options.resolution,
4394
+ negative_prompt: options.negative_prompt,
4395
+ generate_audio: options.generate_audio,
4396
+ seed: options.seed,
4397
+ cfg_scale: options.cfg_scale,
4398
+ signal: options.signal
4399
+ }
4400
+ );
4401
+ if (!response.data?.result?.video?.url) {
4402
+ throw new BlinkAIError("Invalid response format: missing video URL");
4403
+ }
4404
+ return response.data;
4405
+ } catch (error) {
4406
+ if (error instanceof BlinkAIError) {
4407
+ throw error;
4408
+ }
4409
+ throw new BlinkAIError(
4410
+ `Video generation failed: ${error instanceof Error ? error.message : "Unknown error"}`,
4411
+ void 0,
4412
+ { originalError: error }
4413
+ );
4414
+ }
4415
+ }
4272
4416
  /**
4273
4417
  * Converts text to speech using AI voice synthesis models.
4274
4418
  *
@@ -5608,6 +5752,6 @@ function createClient(config) {
5608
5752
  return new BlinkClientImpl(config);
5609
5753
  }
5610
5754
 
5611
- export { AsyncStorageAdapter, BlinkAIImpl, BlinkAnalyticsImpl, BlinkDataImpl, BlinkDatabase, BlinkRealtimeChannel, BlinkRealtimeImpl, BlinkStorageImpl, BlinkTable, NoOpStorageAdapter, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isNode, isReactNative, isWeb, platform };
5755
+ export { AsyncStorageAdapter, BlinkAIImpl, BlinkAnalyticsImpl, BlinkDataImpl, BlinkDatabase, BlinkRealtimeChannel, BlinkRealtimeImpl, BlinkStorageImpl, BlinkTable, NoOpStorageAdapter, WebStorageAdapter, createClient, getDefaultStorageAdapter, isBrowser, isDeno, isNode, isReactNative, isServer, isWeb, platform };
5612
5756
  //# sourceMappingURL=index.mjs.map
5613
5757
  //# sourceMappingURL=index.mjs.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blinkdotnew/sdk",
3
- "version": "2.1.0",
3
+ "version": "2.2.0",
4
4
  "description": "Blink TypeScript SDK for client-side applications - Zero-boilerplate CRUD + auth + AI + analytics + notifications for modern SaaS/AI apps",
5
5
  "keywords": [
6
6
  "blink",