@weirdfingers/baseboards 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +4 -1
  2. package/dist/index.js +131 -11
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -1
  5. package/templates/api/alembic/env.py +9 -1
  6. package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
  7. package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
  8. package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
  9. package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
  10. package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
  11. package/templates/api/config/generators.yaml +111 -0
  12. package/templates/api/src/boards/__init__.py +1 -1
  13. package/templates/api/src/boards/api/app.py +2 -1
  14. package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
  15. package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
  16. package/templates/api/src/boards/auth/factory.py +1 -1
  17. package/templates/api/src/boards/dbmodels/__init__.py +8 -22
  18. package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
  19. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
  20. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
  23. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
  24. package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
  25. package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
  26. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
  27. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
  30. package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
  31. package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
  32. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
  33. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
  34. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
  35. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
  36. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
  37. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
  38. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
  39. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
  40. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
  41. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
  42. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
  43. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
  44. package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
  45. package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
  46. package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
  47. package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
  48. package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
  49. package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
  50. package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
  51. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
  52. package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
  53. package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
  54. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
  55. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
  56. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
  57. package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
  58. package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
  59. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
  60. package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
  61. package/templates/api/src/boards/graphql/access_control.py +1 -1
  62. package/templates/api/src/boards/graphql/mutations/root.py +16 -4
  63. package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
  64. package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
  65. package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
  66. package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
  67. package/templates/api/src/boards/graphql/types/generation.py +62 -26
  68. package/templates/api/src/boards/middleware.py +1 -1
  69. package/templates/api/src/boards/storage/factory.py +2 -2
  70. package/templates/api/src/boards/tenant_isolation.py +9 -9
  71. package/templates/api/src/boards/workers/actors.py +10 -1
  72. package/templates/web/package.json +1 -1
  73. package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
  74. package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
  75. package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
  76. package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
@@ -0,0 +1,173 @@
1
+ """
2
+ Sora 2 text-to-video generator.
3
+
4
+ Text-to-video endpoint for Sora 2, OpenAI's state-of-the-art video model capable of
5
+ creating richly detailed, dynamic clips with audio from natural language prompts.
6
+
7
+ Based on Fal AI's fal-ai/sora-2/text-to-video model.
8
+ See: https://fal.ai/models/fal-ai/sora-2/text-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class Sora2TextToVideoInput(BaseModel):
20
+ """Input schema for Sora 2 text-to-video generation.
21
+
22
+ Artifact fields are automatically detected via type introspection
23
+ and resolved from generation IDs to artifact objects.
24
+ """
25
+
26
+ prompt: str = Field(
27
+ description="Text description of desired video",
28
+ min_length=1,
29
+ max_length=5000,
30
+ )
31
+ resolution: Literal["720p"] = Field(
32
+ default="720p",
33
+ description="Video output quality (currently only 720p is supported)",
34
+ )
35
+ aspect_ratio: Literal["9:16", "16:9"] = Field(
36
+ default="16:9",
37
+ description="Video dimensions",
38
+ )
39
+ duration: Literal[4, 8, 12] = Field(
40
+ default=4,
41
+ description="Video length in seconds",
42
+ )
43
+
44
+
45
+ class FalSora2TextToVideoGenerator(BaseGenerator):
46
+ """Generator for text-to-video using Sora 2."""
47
+
48
+ name = "fal-sora-2-text-to-video"
49
+ description = (
50
+ "Fal: Sora 2 - OpenAI's state-of-the-art text-to-video with richly detailed, dynamic clips"
51
+ )
52
+ artifact_type = "video"
53
+
54
+ def get_input_schema(self) -> type[Sora2TextToVideoInput]:
55
+ """Return the input schema for this generator."""
56
+ return Sora2TextToVideoInput
57
+
58
+ async def generate(
59
+ self, inputs: Sora2TextToVideoInput, context: GeneratorExecutionContext
60
+ ) -> GeneratorResult:
61
+ """Generate video using fal.ai Sora 2 model."""
62
+ # Check for API key
63
+ if not os.getenv("FAL_KEY"):
64
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
65
+
66
+ # Import fal_client
67
+ try:
68
+ import fal_client
69
+ except ImportError as e:
70
+ raise ImportError(
71
+ "fal.ai SDK is required for FalSora2TextToVideoGenerator. "
72
+ "Install with: pip install weirdfingers-boards[generators-fal]"
73
+ ) from e
74
+
75
+ # Prepare arguments for fal.ai API
76
+ arguments = {
77
+ "prompt": inputs.prompt,
78
+ "resolution": inputs.resolution,
79
+ "aspect_ratio": inputs.aspect_ratio,
80
+ "duration": inputs.duration,
81
+ }
82
+
83
+ # Submit async job
84
+ handler = await fal_client.submit_async(
85
+ "fal-ai/sora-2/text-to-video",
86
+ arguments=arguments,
87
+ )
88
+
89
+ # Store external job ID
90
+ await context.set_external_job_id(handler.request_id)
91
+
92
+ # Stream progress updates
93
+ from .....progress.models import ProgressUpdate
94
+
95
+ event_count = 0
96
+ async for event in handler.iter_events(with_logs=True):
97
+ event_count += 1
98
+ # Sample every 3rd event to avoid spam
99
+ if event_count % 3 == 0:
100
+ # Extract logs if available
101
+ logs = getattr(event, "logs", None)
102
+ if logs:
103
+ # Join log entries into a single message
104
+ if isinstance(logs, list):
105
+ message = " | ".join(str(log) for log in logs if log)
106
+ else:
107
+ message = str(logs)
108
+
109
+ if message:
110
+ await context.publish_progress(
111
+ ProgressUpdate(
112
+ job_id=handler.request_id,
113
+ status="processing",
114
+ progress=50.0, # Approximate mid-point progress
115
+ phase="processing",
116
+ message=message,
117
+ )
118
+ )
119
+
120
+ # Get final result
121
+ result = await handler.get()
122
+
123
+ # Extract video from result
124
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4",
125
+ # "width": ..., "height": ..., "duration": ..., "fps": ...}}
126
+ video_data = result.get("video")
127
+ if not video_data:
128
+ raise ValueError("No video returned from fal.ai API")
129
+
130
+ video_url = video_data.get("url")
131
+ if not video_url:
132
+ raise ValueError("Video missing URL in fal.ai response")
133
+
134
+ # Extract video metadata from response or use defaults
135
+ width = video_data.get("width")
136
+ height = video_data.get("height")
137
+ duration = video_data.get("duration")
138
+ fps = video_data.get("fps")
139
+
140
+ # If dimensions not provided, determine based on aspect ratio and resolution
141
+ if width is None or height is None:
142
+ # 720p dimensions
143
+ aspect_ratio_dimensions = {
144
+ "16:9": (1280, 720),
145
+ "9:16": (720, 1280),
146
+ }
147
+ width, height = aspect_ratio_dimensions.get(inputs.aspect_ratio, (1280, 720))
148
+
149
+ # Store video result
150
+ artifact = await context.store_video_result(
151
+ storage_url=video_url,
152
+ format="mp4",
153
+ width=width,
154
+ height=height,
155
+ duration=float(duration) if duration else float(inputs.duration),
156
+ fps=fps,
157
+ output_index=0,
158
+ )
159
+
160
+ return GeneratorResult(outputs=[artifact])
161
+
162
+ async def estimate_cost(self, inputs: Sora2TextToVideoInput) -> float:
163
+ """Estimate cost for Sora 2 generation.
164
+
165
+ Pricing information not provided in official documentation.
166
+ Estimated at $0.20 per video based on typical high-quality video generation costs.
167
+ Cost scales with duration.
168
+ """
169
+ # Approximate cost per video - Sora 2 is likely higher cost due to quality
170
+ base_cost = 0.20
171
+ # Scale by duration: 4s = 1x, 8s = 2x, 12s = 3x
172
+ duration_multiplier = inputs.duration / 4
173
+ return base_cost * duration_multiplier
@@ -0,0 +1,221 @@
1
+ """
2
+ fal.ai infinitalk video generator.
3
+
4
+ Generates talking avatar videos from an image and audio file. The avatar
5
+ lip-syncs to the provided audio with natural facial expressions.
6
+
7
+ Based on Fal AI's fal-ai/infinitalk model.
8
+ See: https://fal.ai/models/fal-ai/infinitalk
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import AudioArtifact, ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class InfinitalkInput(BaseModel):
21
+ """Input schema for infinitalk.
22
+
23
+ Artifact fields are automatically detected via type introspection
24
+ and resolved from generation IDs to artifact objects.
25
+ """
26
+
27
+ image: ImageArtifact = Field(
28
+ description=(
29
+ "Input image for the avatar. "
30
+ "If the aspect ratio doesn't match, it is resized and center cropped"
31
+ )
32
+ )
33
+ audio: AudioArtifact = Field(description="Audio file to synchronize with the avatar")
34
+ prompt: str = Field(description="Text prompt to guide video generation")
35
+ num_frames: int = Field(
36
+ default=145,
37
+ ge=41,
38
+ le=721,
39
+ description="Number of frames to generate",
40
+ )
41
+ resolution: Literal["480p", "720p"] = Field(
42
+ default="480p",
43
+ description="Output video resolution",
44
+ )
45
+ acceleration: Literal["none", "regular", "high"] = Field(
46
+ default="regular",
47
+ description="Acceleration level for generation speed",
48
+ )
49
+ seed: int = Field(
50
+ default=42,
51
+ description="Seed for reproducibility",
52
+ )
53
+
54
+
55
+ class FalInfinitalkGenerator(BaseGenerator):
56
+ """Generator for talking avatar videos from image and audio."""
57
+
58
+ name = "fal-infinitalk"
59
+ description = "Fal: infinitalk - Generate talking avatar video from image and audio"
60
+ artifact_type = "video"
61
+
62
+ def get_input_schema(self) -> type[InfinitalkInput]:
63
+ """Return the input schema for this generator."""
64
+ return InfinitalkInput
65
+
66
+ async def generate(
67
+ self, inputs: InfinitalkInput, context: GeneratorExecutionContext
68
+ ) -> GeneratorResult:
69
+ """Generate talking avatar video using fal.ai infinitalk."""
70
+ # Check for API key
71
+ if not os.getenv("FAL_KEY"):
72
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
73
+
74
+ # Import fal_client
75
+ try:
76
+ import fal_client
77
+ except ImportError as e:
78
+ raise ImportError(
79
+ "fal.ai SDK is required for FalInfinitalkGenerator. "
80
+ "Install with: pip install weirdfingers-boards[generators-fal]"
81
+ ) from e
82
+
83
+ # Upload image and audio artifacts to Fal's public storage
84
+ # Fal API requires publicly accessible URLs
85
+ from ..utils import upload_artifacts_to_fal
86
+
87
+ # Upload image and audio separately
88
+ image_urls = await upload_artifacts_to_fal([inputs.image], context)
89
+ audio_urls = await upload_artifacts_to_fal([inputs.audio], context)
90
+
91
+ # Prepare arguments for fal.ai API
92
+ arguments = {
93
+ "image_url": image_urls[0],
94
+ "audio_url": audio_urls[0],
95
+ "prompt": inputs.prompt,
96
+ "num_frames": inputs.num_frames,
97
+ "resolution": inputs.resolution,
98
+ "acceleration": inputs.acceleration,
99
+ "seed": inputs.seed,
100
+ }
101
+
102
+ # Submit async job
103
+ handler = await fal_client.submit_async(
104
+ "fal-ai/infinitalk",
105
+ arguments=arguments,
106
+ )
107
+
108
+ # Store external job ID
109
+ await context.set_external_job_id(handler.request_id)
110
+
111
+ # Stream progress updates
112
+ from .....progress.models import ProgressUpdate
113
+
114
+ event_count = 0
115
+ async for event in handler.iter_events(with_logs=True):
116
+ event_count += 1
117
+ # Sample every 3rd event to avoid spam
118
+ if event_count % 3 == 0:
119
+ # Extract logs if available
120
+ logs = getattr(event, "logs", None)
121
+ if logs:
122
+ # Join log entries into a single message
123
+ if isinstance(logs, list):
124
+ message = " | ".join(str(log) for log in logs if log)
125
+ else:
126
+ message = str(logs)
127
+
128
+ if message:
129
+ await context.publish_progress(
130
+ ProgressUpdate(
131
+ job_id=handler.request_id,
132
+ status="processing",
133
+ progress=50.0, # Approximate mid-point progress
134
+ phase="processing",
135
+ message=message,
136
+ )
137
+ )
138
+
139
+ # Get final result
140
+ result = await handler.get()
141
+
142
+ # Extract video from result
143
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}, "seed": 42}
144
+ video_data = result.get("video")
145
+
146
+ if not video_data:
147
+ raise ValueError("No video returned from fal.ai API")
148
+
149
+ video_url = video_data.get("url")
150
+ if not video_url:
151
+ raise ValueError("Video missing URL in fal.ai response")
152
+
153
+ # Extract format from content_type (e.g., "video/mp4" -> "mp4")
154
+ # Infinitalk always produces MP4 videos, so default to mp4
155
+ content_type = video_data.get("content_type", "video/mp4")
156
+ if content_type.startswith("video/"):
157
+ video_format = content_type.split("/")[-1]
158
+ else:
159
+ # If content_type is not a video mime type (e.g., application/octet-stream),
160
+ # default to mp4 since infinitalk only produces mp4 videos
161
+ video_format = "mp4"
162
+
163
+ # Store the video result
164
+ # Use input image dimensions and audio duration for metadata
165
+ # Estimate FPS based on num_frames and audio duration
166
+ fps = 30.0 # Default FPS
167
+ if inputs.audio.duration and inputs.audio.duration > 0:
168
+ fps = inputs.num_frames / inputs.audio.duration
169
+
170
+ # Parse resolution to get dimensions
171
+ width, height = self._parse_resolution(inputs.resolution)
172
+
173
+ artifact = await context.store_video_result(
174
+ storage_url=video_url,
175
+ format=video_format,
176
+ width=width,
177
+ height=height,
178
+ duration=inputs.audio.duration,
179
+ fps=int(fps),
180
+ output_index=0,
181
+ )
182
+
183
+ return GeneratorResult(outputs=[artifact])
184
+
185
+ def _parse_resolution(self, resolution: str) -> tuple[int, int]:
186
+ """Parse resolution string to width and height.
187
+
188
+ Args:
189
+ resolution: Resolution string like "480p" or "720p"
190
+
191
+ Returns:
192
+ Tuple of (width, height)
193
+ """
194
+ if resolution == "480p":
195
+ return (854, 480)
196
+ elif resolution == "720p":
197
+ return (1280, 720)
198
+ else:
199
+ # Default to 480p
200
+ return (854, 480)
201
+
202
+ async def estimate_cost(self, inputs: InfinitalkInput) -> float:
203
+ """Estimate cost for infinitalk generation in USD.
204
+
205
+ Pricing not specified in documentation, using estimate based on
206
+ typical video generation costs. Higher resolution and more frames
207
+ may increase cost.
208
+ """
209
+ # Base cost estimate per generation
210
+ base_cost = 0.10
211
+
212
+ # Adjust for resolution
213
+ if inputs.resolution == "720p":
214
+ base_cost *= 1.5
215
+
216
+ # Adjust for frame count (more frames = higher cost)
217
+ # Base estimate is for 145 frames
218
+ frame_multiplier = inputs.num_frames / 145.0
219
+ base_cost *= frame_multiplier
220
+
221
+ return base_cost
@@ -0,0 +1,175 @@
1
+ """
2
+ Kling v2.5 Turbo Pro image-to-video generator.
3
+
4
+ Top-tier image-to-video generation with unparalleled motion fluidity, cinematic visuals,
5
+ and exceptional prompt precision using Kling's v2.5 Turbo Pro model.
6
+
7
+ Based on Fal AI's fal-ai/kling-video/v2.5-turbo/pro/image-to-video model.
8
+ See: https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/pro/image-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class KlingVideoV25TurboProImageToVideoInput(BaseModel):
21
+ """Input schema for Kling v2.5 Turbo Pro image-to-video generation.
22
+
23
+ Artifact fields (image_url) are automatically detected via type introspection
24
+ and resolved from generation IDs to ImageArtifact objects.
25
+ """
26
+
27
+ prompt: str = Field(
28
+ description="Text description of desired video content",
29
+ max_length=2500,
30
+ )
31
+ image_url: ImageArtifact = Field(
32
+ description="Source image for animation",
33
+ )
34
+ duration: Literal["5", "10"] = Field(
35
+ default="5",
36
+ description="Video length in seconds",
37
+ )
38
+ negative_prompt: str = Field(
39
+ default="blur, distort, and low quality",
40
+ description="Elements to exclude from output",
41
+ max_length=2500,
42
+ )
43
+ cfg_scale: float = Field(
44
+ default=0.5,
45
+ ge=0.0,
46
+ le=1.0,
47
+ description="Guidance strength controlling prompt adherence (0-1)",
48
+ )
49
+
50
+
51
+ class FalKlingVideoV25TurboProImageToVideoGenerator(BaseGenerator):
52
+ """Generator for image-to-video using Kling v2.5 Turbo Pro."""
53
+
54
+ name = "fal-kling-video-v2-5-turbo-pro-image-to-video"
55
+ description = (
56
+ "Fal: Kling v2.5 Turbo Pro - top-tier image-to-video generation with cinematic visuals"
57
+ )
58
+ artifact_type = "video"
59
+
60
+ def get_input_schema(self) -> type[KlingVideoV25TurboProImageToVideoInput]:
61
+ """Return the input schema for this generator."""
62
+ return KlingVideoV25TurboProImageToVideoInput
63
+
64
+ async def generate(
65
+ self, inputs: KlingVideoV25TurboProImageToVideoInput, context: GeneratorExecutionContext
66
+ ) -> GeneratorResult:
67
+ """Generate video using fal.ai Kling v2.5 Turbo Pro image-to-video model."""
68
+ # Check for API key
69
+ if not os.getenv("FAL_KEY"):
70
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
71
+
72
+ # Import fal_client
73
+ try:
74
+ import fal_client
75
+ except ImportError as e:
76
+ raise ImportError(
77
+ "fal.ai SDK is required for FalKlingVideoV25TurboProImageToVideoGenerator. "
78
+ "Install with: pip install weirdfingers-boards[generators-fal]"
79
+ ) from e
80
+
81
+ # Upload image artifact to Fal's public storage
82
+ # Fal API requires publicly accessible URLs
83
+ from ..utils import upload_artifacts_to_fal
84
+
85
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
86
+
87
+ # Prepare arguments for fal.ai API
88
+ arguments = {
89
+ "prompt": inputs.prompt,
90
+ "image_url": image_urls[0],
91
+ "duration": inputs.duration,
92
+ "negative_prompt": inputs.negative_prompt,
93
+ "cfg_scale": inputs.cfg_scale,
94
+ }
95
+
96
+ # Submit async job
97
+ handler = await fal_client.submit_async(
98
+ "fal-ai/kling-video/v2.5-turbo/pro/image-to-video",
99
+ arguments=arguments,
100
+ )
101
+
102
+ # Store external job ID
103
+ await context.set_external_job_id(handler.request_id)
104
+
105
+ # Stream progress updates
106
+ from .....progress.models import ProgressUpdate
107
+
108
+ event_count = 0
109
+ async for event in handler.iter_events(with_logs=True):
110
+ event_count += 1
111
+ # Sample every 3rd event to avoid spam
112
+ if event_count % 3 == 0:
113
+ # Extract logs if available
114
+ logs = getattr(event, "logs", None)
115
+ if logs:
116
+ # Join log entries into a single message
117
+ if isinstance(logs, list):
118
+ message = " | ".join(str(log) for log in logs if log)
119
+ else:
120
+ message = str(logs)
121
+
122
+ if message:
123
+ await context.publish_progress(
124
+ ProgressUpdate(
125
+ job_id=handler.request_id,
126
+ status="processing",
127
+ progress=50.0, # Approximate mid-point progress
128
+ phase="processing",
129
+ message=message,
130
+ )
131
+ )
132
+
133
+ # Get final result
134
+ result = await handler.get()
135
+
136
+ # Extract video from result
137
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}}
138
+ video_data = result.get("video")
139
+ if not video_data:
140
+ raise ValueError("No video returned from fal.ai API")
141
+
142
+ video_url = video_data.get("url")
143
+ if not video_url:
144
+ raise ValueError("Video missing URL in fal.ai response")
145
+
146
+ # Determine video dimensions based on input image
147
+ # Kling maintains the aspect ratio of the input image
148
+ # Use input image dimensions as reference
149
+ width = inputs.image_url.width
150
+ height = inputs.image_url.height
151
+
152
+ # Store video result
153
+ artifact = await context.store_video_result(
154
+ storage_url=video_url,
155
+ format="mp4",
156
+ width=width,
157
+ height=height,
158
+ duration=float(inputs.duration), # Convert "5" or "10" to float
159
+ output_index=0,
160
+ )
161
+
162
+ return GeneratorResult(outputs=[artifact])
163
+
164
+ async def estimate_cost(self, inputs: KlingVideoV25TurboProImageToVideoInput) -> float:
165
+ """Estimate cost for Kling v2.5 Turbo Pro image-to-video generation.
166
+
167
+ Pricing information not provided in official documentation.
168
+ Estimated at $0.15 per video based on typical video generation costs.
169
+ Cost may vary based on duration and quality settings.
170
+ """
171
+ # Approximate cost per video
172
+ # 10-second videos may cost more than 5-second videos
173
+ base_cost = 0.15
174
+ duration_multiplier = 2.0 if inputs.duration == "10" else 1.0
175
+ return base_cost * duration_multiplier