@weirdfingers/baseboards 0.2.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +14 -4
  2. package/dist/index.js +13 -4
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -1
  5. package/templates/api/ARTIFACT_RESOLUTION_GUIDE.md +148 -0
  6. package/templates/api/Dockerfile +2 -2
  7. package/templates/api/README.md +138 -6
  8. package/templates/api/config/generators.yaml +41 -7
  9. package/templates/api/docs/TESTING_LIVE_APIS.md +417 -0
  10. package/templates/api/pyproject.toml +49 -9
  11. package/templates/api/src/boards/__init__.py +1 -1
  12. package/templates/api/src/boards/auth/adapters/__init__.py +9 -2
  13. package/templates/api/src/boards/auth/factory.py +16 -2
  14. package/templates/api/src/boards/generators/__init__.py +2 -2
  15. package/templates/api/src/boards/generators/artifact_resolution.py +372 -0
  16. package/templates/api/src/boards/generators/artifacts.py +4 -4
  17. package/templates/api/src/boards/generators/base.py +8 -4
  18. package/templates/api/src/boards/generators/implementations/__init__.py +4 -2
  19. package/templates/api/src/boards/generators/implementations/fal/__init__.py +25 -0
  20. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +4 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_music_v2.py +173 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +221 -0
  23. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +17 -0
  24. package/templates/api/src/boards/generators/implementations/fal/image/flux_pro_kontext.py +216 -0
  25. package/templates/api/src/boards/generators/implementations/fal/image/flux_pro_ultra.py +197 -0
  26. package/templates/api/src/boards/generators/implementations/fal/image/imagen4_preview.py +191 -0
  27. package/templates/api/src/boards/generators/implementations/fal/image/imagen4_preview_fast.py +179 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana.py +183 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_edit.py +212 -0
  30. package/templates/api/src/boards/generators/implementations/fal/utils.py +61 -0
  31. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +13 -0
  32. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_text_to_video.py +168 -0
  33. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2.py +167 -0
  34. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +180 -0
  35. package/templates/api/src/boards/generators/implementations/openai/__init__.py +1 -0
  36. package/templates/api/src/boards/generators/implementations/openai/audio/__init__.py +1 -0
  37. package/templates/api/src/boards/generators/implementations/{audio → openai/audio}/whisper.py +9 -6
  38. package/templates/api/src/boards/generators/implementations/openai/image/__init__.py +1 -0
  39. package/templates/api/src/boards/generators/implementations/{image → openai/image}/dalle3.py +8 -5
  40. package/templates/api/src/boards/generators/implementations/replicate/__init__.py +1 -0
  41. package/templates/api/src/boards/generators/implementations/replicate/image/__init__.py +1 -0
  42. package/templates/api/src/boards/generators/implementations/{image → replicate/image}/flux_pro.py +8 -5
  43. package/templates/api/src/boards/generators/implementations/replicate/video/__init__.py +1 -0
  44. package/templates/api/src/boards/generators/implementations/{video → replicate/video}/lipsync.py +9 -6
  45. package/templates/api/src/boards/generators/resolution.py +80 -20
  46. package/templates/api/src/boards/jobs/repository.py +49 -0
  47. package/templates/api/src/boards/storage/factory.py +16 -6
  48. package/templates/api/src/boards/workers/actors.py +69 -5
  49. package/templates/api/src/boards/workers/context.py +177 -21
  50. package/templates/web/package.json +2 -1
  51. package/templates/web/src/components/boards/GenerationInput.tsx +154 -52
  52. package/templates/web/src/components/boards/GeneratorSelector.tsx +57 -59
  53. package/templates/web/src/components/ui/dropdown-menu.tsx +200 -0
  54. package/templates/api/src/boards/generators/implementations/audio/__init__.py +0 -3
  55. package/templates/api/src/boards/generators/implementations/image/__init__.py +0 -3
  56. package/templates/api/src/boards/generators/implementations/video/__init__.py +0 -3
@@ -0,0 +1,183 @@
1
+ """
2
+ fal.ai nano-banana text-to-image generator.
3
+
4
+ Fast image generation using fal.ai's nano-banana model with support for batch outputs.
5
+ """
6
+
7
+ import os
8
+ from typing import Literal
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
13
+
14
+
15
+ class NanoBananaInput(BaseModel):
16
+ """Input schema for nano-banana image generation."""
17
+
18
+ prompt: str = Field(description="Text prompt for image generation")
19
+ image_size: Literal[
20
+ "square_hd",
21
+ "square",
22
+ "portrait_4_3",
23
+ "portrait_16_9",
24
+ "landscape_4_3",
25
+ "landscape_16_9",
26
+ ] = Field(
27
+ default="landscape_4_3",
28
+ description="Image aspect ratio and resolution",
29
+ )
30
+ num_inference_steps: int = Field(
31
+ default=4,
32
+ ge=1,
33
+ le=50,
34
+ description="Number of inference steps (more steps = higher quality but slower)",
35
+ )
36
+ guidance_scale: float = Field(
37
+ default=3.5,
38
+ ge=1.0,
39
+ le=20.0,
40
+ description="Guidance scale for prompt adherence",
41
+ )
42
+ num_images: int = Field(
43
+ default=1,
44
+ ge=1,
45
+ le=10,
46
+ description="Number of images to generate in batch",
47
+ )
48
+ enable_safety_checker: bool = Field(
49
+ default=True,
50
+ description="Enable safety checker to filter unsafe content",
51
+ )
52
+ seed: int | None = Field(
53
+ default=None,
54
+ description="Random seed for reproducibility (optional)",
55
+ )
56
+ sync_mode: bool = Field(
57
+ default=True,
58
+ description="Use synchronous mode (wait for completion)",
59
+ )
60
+ output_format: Literal["jpeg", "png"] = Field(
61
+ default="jpeg",
62
+ description="Output image format",
63
+ )
64
+
65
+
66
+ class FalNanoBananaGenerator(BaseGenerator):
67
+ """nano-banana image generator using fal.ai."""
68
+
69
+ name = "fal-nano-banana"
70
+ artifact_type = "image"
71
+ description = "Fal: nano-banana - fast text-to-image generation with batch support"
72
+
73
+ def get_input_schema(self) -> type[NanoBananaInput]:
74
+ return NanoBananaInput
75
+
76
+ async def generate(
77
+ self, inputs: NanoBananaInput, context: GeneratorExecutionContext
78
+ ) -> GeneratorResult:
79
+ """Generate images using fal.ai nano-banana model."""
80
+ # Check for API key (fal-client uses FAL_KEY environment variable)
81
+ if not os.getenv("FAL_KEY"):
82
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
83
+
84
+ # Import fal_client
85
+ try:
86
+ import fal_client
87
+ except ImportError as e:
88
+ raise ImportError(
89
+ "fal.ai SDK is required for NanoBananaGenerator. "
90
+ "Install with: pip install weirdfingers-boards[generators-fal]"
91
+ ) from e
92
+
93
+ # Prepare arguments for fal.ai API
94
+ arguments = {
95
+ "prompt": inputs.prompt,
96
+ "image_size": inputs.image_size,
97
+ "num_inference_steps": inputs.num_inference_steps,
98
+ "guidance_scale": inputs.guidance_scale,
99
+ "num_images": inputs.num_images,
100
+ "enable_safety_checker": inputs.enable_safety_checker,
101
+ "sync_mode": inputs.sync_mode,
102
+ "output_format": inputs.output_format,
103
+ }
104
+
105
+ # Add seed if provided
106
+ if inputs.seed is not None:
107
+ arguments["seed"] = inputs.seed
108
+
109
+ # Submit async job and get handler
110
+ handler = await fal_client.submit_async(
111
+ "fal-ai/nano-banana",
112
+ arguments=arguments,
113
+ )
114
+
115
+ # Store the external job ID for tracking
116
+ await context.set_external_job_id(handler.request_id)
117
+
118
+ # Stream progress updates (sample every 3rd event to avoid spam)
119
+ from .....progress.models import ProgressUpdate
120
+
121
+ event_count = 0
122
+ async for event in handler.iter_events(with_logs=True):
123
+ event_count += 1
124
+
125
+ # Process every 3rd event to provide feedback without overwhelming
126
+ if event_count % 3 == 0:
127
+ # Extract logs if available
128
+ logs = getattr(event, "logs", None)
129
+ if logs:
130
+ # Join log entries into a single message
131
+ if isinstance(logs, list):
132
+ message = " | ".join(str(log) for log in logs if log)
133
+ else:
134
+ message = str(logs)
135
+
136
+ if message:
137
+ await context.publish_progress(
138
+ ProgressUpdate(
139
+ job_id=handler.request_id,
140
+ status="processing",
141
+ progress=50.0, # Approximate mid-point progress
142
+ phase="processing",
143
+ message=message,
144
+ )
145
+ )
146
+
147
+ # Get final result
148
+ result = await handler.get()
149
+
150
+ # Extract image URLs from result
151
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ...}, ...]}
152
+ images = result.get("images", [])
153
+ if not images:
154
+ raise ValueError("No images returned from fal.ai API")
155
+
156
+ # Store each image using output_index
157
+ artifacts = []
158
+ for idx, image_data in enumerate(images):
159
+ image_url = image_data.get("url")
160
+ width = image_data.get("width")
161
+ height = image_data.get("height")
162
+
163
+ if not image_url:
164
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
165
+
166
+ # Store with appropriate output_index
167
+ artifact = await context.store_image_result(
168
+ storage_url=image_url,
169
+ format=inputs.output_format,
170
+ width=width,
171
+ height=height,
172
+ output_index=idx,
173
+ )
174
+ artifacts.append(artifact)
175
+
176
+ return GeneratorResult(outputs=artifacts)
177
+
178
+ async def estimate_cost(self, inputs: NanoBananaInput) -> float:
179
+ """Estimate cost for nano-banana generation.
180
+
181
+ nano-banana typically costs around $0.003 per image.
182
+ """
183
+ return 0.003 * inputs.num_images # $0.003 per image, scaled by batch size
@@ -0,0 +1,212 @@
1
+ """
2
+ fal.ai nano-banana image-to-image editing generator.
3
+
4
+ Edit images using fal.ai's nano-banana/edit model (powered by Gemini).
5
+ Supports editing multiple input images with a text prompt.
6
+ """
7
+
8
+ import os
9
+ from typing import Literal
10
+
11
+ from pydantic import BaseModel, Field
12
+
13
+ from ....artifacts import ImageArtifact
14
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
15
+
16
+
17
+ class NanoBananaEditInput(BaseModel):
18
+ """Input schema for nano-banana image editing.
19
+
20
+ Artifact fields (like image_sources) are automatically detected via type
21
+ introspection and resolved from generation IDs to ImageArtifact objects.
22
+ """
23
+
24
+ prompt: str = Field(description="The prompt for image editing")
25
+ image_sources: list[ImageArtifact] = Field(
26
+ description="List of input images for editing (from previous generations)",
27
+ min_length=1,
28
+ )
29
+ num_images: int = Field(
30
+ default=1,
31
+ ge=1,
32
+ le=10,
33
+ description="Number of images to generate",
34
+ )
35
+ output_format: Literal["jpeg", "png", "webp"] = Field(
36
+ default="jpeg",
37
+ description="Output image format",
38
+ )
39
+ sync_mode: bool = Field(
40
+ default=False,
41
+ description=(
42
+ "If True, the media will be returned as a data URI and the output "
43
+ "data won't be available in the request history"
44
+ ),
45
+ )
46
+ limit_generations: bool = Field(
47
+ default=False,
48
+ description=(
49
+ "Experimental parameter to limit the number of generations from each "
50
+ "round of prompting to 1. Set to True to disregard any instructions in "
51
+ "the prompt regarding the number of images to generate"
52
+ ),
53
+ )
54
+ aspect_ratio: (
55
+ Literal[
56
+ "21:9",
57
+ "1:1",
58
+ "4:3",
59
+ "3:2",
60
+ "2:3",
61
+ "5:4",
62
+ "4:5",
63
+ "3:4",
64
+ "16:9",
65
+ "9:16",
66
+ ]
67
+ | None
68
+ ) = Field(
69
+ default=None,
70
+ description=(
71
+ "Aspect ratio for generated images. Default is None, which takes one "
72
+ "of the input images' aspect ratio"
73
+ ),
74
+ )
75
+
76
+
77
+ class FalNanoBananaEditGenerator(BaseGenerator):
78
+ """nano-banana image editing generator using fal.ai."""
79
+
80
+ name = "fal-nano-banana-edit"
81
+ artifact_type = "image"
82
+ description = "Fal: nano-banana edit - AI-powered image editing with Gemini"
83
+
84
+ def get_input_schema(self) -> type[NanoBananaEditInput]:
85
+ return NanoBananaEditInput
86
+
87
+ async def generate(
88
+ self, inputs: NanoBananaEditInput, context: GeneratorExecutionContext
89
+ ) -> GeneratorResult:
90
+ """Edit images using fal.ai nano-banana/edit model."""
91
+ # Check for API key (fal-client uses FAL_KEY environment variable)
92
+ if not os.getenv("FAL_KEY"):
93
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
94
+
95
+ # Import fal_client
96
+ try:
97
+ import fal_client
98
+ except ImportError as e:
99
+ raise ImportError(
100
+ "fal.ai SDK is required for FalNanoBananaEditGenerator. "
101
+ "Install with: pip install weirdfingers-boards[generators-fal]"
102
+ ) from e
103
+
104
+ # Upload image artifacts to Fal's public storage
105
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
106
+ # - Localhost URLs (not publicly accessible)
107
+ # - Private S3 buckets (not publicly accessible)
108
+ # So we upload to Fal's temporary storage first
109
+ from ..utils import upload_artifacts_to_fal
110
+
111
+ image_urls = await upload_artifacts_to_fal(inputs.image_sources, context)
112
+
113
+ # Prepare arguments for fal.ai API
114
+ arguments = {
115
+ "prompt": inputs.prompt,
116
+ "image_urls": image_urls,
117
+ "num_images": inputs.num_images,
118
+ "output_format": inputs.output_format,
119
+ "sync_mode": inputs.sync_mode,
120
+ "limit_generations": inputs.limit_generations,
121
+ }
122
+
123
+ # Add aspect_ratio if provided
124
+ if inputs.aspect_ratio is not None:
125
+ arguments["aspect_ratio"] = inputs.aspect_ratio
126
+
127
+ # Submit async job and get handler
128
+ handler = await fal_client.submit_async(
129
+ "fal-ai/nano-banana/edit",
130
+ arguments=arguments,
131
+ )
132
+
133
+ # Store the external job ID for tracking
134
+ await context.set_external_job_id(handler.request_id)
135
+
136
+ # Stream progress updates (sample every 3rd event to avoid spam)
137
+ from .....progress.models import ProgressUpdate
138
+
139
+ event_count = 0
140
+ async for event in handler.iter_events(with_logs=True):
141
+ event_count += 1
142
+
143
+ # Process every 3rd event to provide feedback without overwhelming
144
+ if event_count % 3 == 0:
145
+ # Extract logs if available
146
+ logs = getattr(event, "logs", None)
147
+ if logs:
148
+ # Join log entries into a single message
149
+ if isinstance(logs, list):
150
+ message = " | ".join(str(log) for log in logs if log)
151
+ else:
152
+ message = str(logs)
153
+
154
+ if message:
155
+ await context.publish_progress(
156
+ ProgressUpdate(
157
+ job_id=handler.request_id,
158
+ status="processing",
159
+ progress=50.0, # Approximate mid-point progress
160
+ phase="processing",
161
+ message=message,
162
+ )
163
+ )
164
+
165
+ # Get final result
166
+ result = await handler.get()
167
+
168
+ # Extract image URLs and description from result
169
+ # fal.ai returns: {
170
+ # "images": [{"url": "...", ...}, ...],
171
+ # "description": "Text description from Gemini"
172
+ # }
173
+ images = result.get("images", [])
174
+
175
+ if not images:
176
+ raise ValueError("No images returned from fal.ai API")
177
+
178
+ # Store each image using output_index
179
+ artifacts = []
180
+ for idx, image_data in enumerate(images):
181
+ image_url = image_data.get("url")
182
+ # Extract dimensions if available, otherwise use sensible defaults
183
+ width = image_data.get("width", 1024)
184
+ height = image_data.get("height", 1024)
185
+
186
+ if not image_url:
187
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
188
+
189
+ # Store with appropriate output_index
190
+ # Note: The Gemini description from the API response (result.get("description"))
191
+ # is not currently stored with the artifact. Consider extending ImageArtifact
192
+ # to support metadata in the future.
193
+ artifact = await context.store_image_result(
194
+ storage_url=image_url,
195
+ format=inputs.output_format,
196
+ width=width,
197
+ height=height,
198
+ output_index=idx,
199
+ )
200
+ artifacts.append(artifact)
201
+
202
+ return GeneratorResult(outputs=artifacts)
203
+
204
+ async def estimate_cost(self, inputs: NanoBananaEditInput) -> float:
205
+ """Estimate cost for nano-banana edit generation.
206
+
207
+ nano-banana/edit uses Gemini for image editing, which has variable costs
208
+ depending on input complexity. Using a conservative estimate.
209
+ """
210
+ # Base cost per edit operation + per-image multiplier
211
+ per_image_cost = 0.039
212
+ return per_image_cost * inputs.num_images
@@ -0,0 +1,61 @@
1
+ """
2
+ Shared utilities for Fal.ai generators.
3
+
4
+ Provides helper functions for common operations across Fal generators.
5
+ """
6
+
7
+ import asyncio
8
+
9
+ from ...artifacts import AudioArtifact, DigitalArtifact, ImageArtifact, VideoArtifact
10
+ from ...base import GeneratorExecutionContext
11
+
12
+
13
+ async def upload_artifacts_to_fal[T: DigitalArtifact](
14
+ artifacts: list[ImageArtifact] | list[VideoArtifact] | list[AudioArtifact] | list[T],
15
+ context: GeneratorExecutionContext,
16
+ ) -> list[str]:
17
+ """
18
+ Upload artifacts to Fal's temporary storage for use in API requests.
19
+
20
+ Fal API endpoints require publicly accessible URLs for file inputs. Since our
21
+ storage URLs might be local or private (localhost, private S3 buckets, etc.),
22
+ we need to:
23
+ 1. Resolve each artifact to a local file path
24
+ 2. Upload to Fal's public temporary storage
25
+ 3. Get back publicly accessible URLs
26
+
27
+ Args:
28
+ artifacts: List of artifacts (image, video, or audio) to upload
29
+ context: Generator execution context for artifact resolution
30
+
31
+ Returns:
32
+ List of publicly accessible URLs from Fal storage
33
+
34
+ Raises:
35
+ ImportError: If fal_client is not installed
36
+ Any exceptions from file resolution or upload are propagated
37
+ """
38
+ # Import fal_client
39
+ try:
40
+ import fal_client
41
+ except ImportError as e:
42
+ raise ImportError(
43
+ "fal.ai SDK is required for Fal generators. "
44
+ "Install with: pip install weirdfingers-boards[generators-fal]"
45
+ ) from e
46
+
47
+ async def upload_single_artifact(artifact: DigitalArtifact) -> str:
48
+ """Upload a single artifact and return its public URL."""
49
+ # Resolve artifact to local file path (downloads if needed)
50
+ file_path_str = await context.resolve_artifact(artifact)
51
+
52
+ # Upload to Fal's temporary storage and get public URL
53
+ # fal_client.upload_file_async expects a file path
54
+ url = await fal_client.upload_file_async(file_path_str) # type: ignore[arg-type]
55
+
56
+ return url
57
+
58
+ # Upload all artifacts in parallel for performance
59
+ urls = await asyncio.gather(*[upload_single_artifact(artifact) for artifact in artifacts])
60
+
61
+ return list(urls)
@@ -0,0 +1,13 @@
1
+ """Fal.ai video generators."""
2
+
3
+ from .kling_video_v2_5_turbo_pro_text_to_video import (
4
+ FalKlingVideoV25TurboProTextToVideoGenerator,
5
+ )
6
+ from .sync_lipsync_v2 import FalSyncLipsyncV2Generator
7
+ from .veo31_first_last_frame_to_video import FalVeo31FirstLastFrameToVideoGenerator
8
+
9
+ __all__ = [
10
+ "FalKlingVideoV25TurboProTextToVideoGenerator",
11
+ "FalSyncLipsyncV2Generator",
12
+ "FalVeo31FirstLastFrameToVideoGenerator",
13
+ ]
@@ -0,0 +1,168 @@
1
+ """
2
+ Kling v2.5 Turbo Pro text-to-video generator.
3
+
4
+ Top-tier text-to-video generation with unparalleled motion fluidity, cinematic visuals,
5
+ and exceptional prompt precision using Kling's v2.5 Turbo Pro model.
6
+
7
+ Based on Fal AI's fal-ai/kling-video/v2.5-turbo/pro/text-to-video model.
8
+ See: https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/pro/text-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class KlingVideoV25TurboProTextToVideoInput(BaseModel):
20
+ """Input schema for Kling v2.5 Turbo Pro text-to-video generation."""
21
+
22
+ prompt: str = Field(
23
+ description="Primary instruction for video generation",
24
+ max_length=2500,
25
+ )
26
+ duration: Literal["5", "10"] = Field(
27
+ default="5",
28
+ description="Video length in seconds",
29
+ )
30
+ aspect_ratio: Literal["16:9", "9:16", "1:1"] = Field(
31
+ default="16:9",
32
+ description="Frame dimensions",
33
+ )
34
+ negative_prompt: str = Field(
35
+ default="blur, distort, and low quality",
36
+ description="Elements to exclude from output",
37
+ max_length=2500,
38
+ )
39
+ cfg_scale: float = Field(
40
+ default=0.5,
41
+ ge=0.0,
42
+ le=1.0,
43
+ description="Guidance strength controlling prompt adherence (0-1)",
44
+ )
45
+
46
+
47
+ class FalKlingVideoV25TurboProTextToVideoGenerator(BaseGenerator):
48
+ """Generator for text-to-video using Kling v2.5 Turbo Pro."""
49
+
50
+ name = "fal-kling-video-v2-5-turbo-pro-text-to-video"
51
+ description = (
52
+ "Fal: Kling v2.5 Turbo Pro - top-tier text-to-video generation with cinematic visuals"
53
+ )
54
+ artifact_type = "video"
55
+
56
+ def get_input_schema(self) -> type[KlingVideoV25TurboProTextToVideoInput]:
57
+ """Return the input schema for this generator."""
58
+ return KlingVideoV25TurboProTextToVideoInput
59
+
60
+ async def generate(
61
+ self, inputs: KlingVideoV25TurboProTextToVideoInput, context: GeneratorExecutionContext
62
+ ) -> GeneratorResult:
63
+ """Generate video using fal.ai Kling v2.5 Turbo Pro model."""
64
+ # Check for API key
65
+ if not os.getenv("FAL_KEY"):
66
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
67
+
68
+ # Import fal_client
69
+ try:
70
+ import fal_client
71
+ except ImportError as e:
72
+ raise ImportError(
73
+ "fal.ai SDK is required for FalKlingVideoV25TurboProTextToVideoGenerator. "
74
+ "Install with: pip install weirdfingers-boards[generators-fal]"
75
+ ) from e
76
+
77
+ # Prepare arguments for fal.ai API
78
+ arguments = {
79
+ "prompt": inputs.prompt,
80
+ "duration": inputs.duration,
81
+ "aspect_ratio": inputs.aspect_ratio,
82
+ "negative_prompt": inputs.negative_prompt,
83
+ "cfg_scale": inputs.cfg_scale,
84
+ }
85
+
86
+ # Submit async job
87
+ handler = await fal_client.submit_async(
88
+ "fal-ai/kling-video/v2.5-turbo/pro/text-to-video",
89
+ arguments=arguments,
90
+ )
91
+
92
+ # Store external job ID
93
+ await context.set_external_job_id(handler.request_id)
94
+
95
+ # Stream progress updates
96
+ from .....progress.models import ProgressUpdate
97
+
98
+ event_count = 0
99
+ async for event in handler.iter_events(with_logs=True):
100
+ event_count += 1
101
+ # Sample every 3rd event to avoid spam
102
+ if event_count % 3 == 0:
103
+ # Extract logs if available
104
+ logs = getattr(event, "logs", None)
105
+ if logs:
106
+ # Join log entries into a single message
107
+ if isinstance(logs, list):
108
+ message = " | ".join(str(log) for log in logs if log)
109
+ else:
110
+ message = str(logs)
111
+
112
+ if message:
113
+ await context.publish_progress(
114
+ ProgressUpdate(
115
+ job_id=handler.request_id,
116
+ status="processing",
117
+ progress=50.0, # Approximate mid-point progress
118
+ phase="processing",
119
+ message=message,
120
+ )
121
+ )
122
+
123
+ # Get final result
124
+ result = await handler.get()
125
+
126
+ # Extract video from result
127
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}}
128
+ video_data = result.get("video")
129
+ if not video_data:
130
+ raise ValueError("No video returned from fal.ai API")
131
+
132
+ video_url = video_data.get("url")
133
+ if not video_url:
134
+ raise ValueError("Video missing URL in fal.ai response")
135
+
136
+ # Determine video dimensions based on aspect ratio
137
+ # Using HD quality resolutions
138
+ aspect_ratio_dimensions = {
139
+ "16:9": (1920, 1080),
140
+ "9:16": (1080, 1920),
141
+ "1:1": (1080, 1080),
142
+ }
143
+ width, height = aspect_ratio_dimensions.get(inputs.aspect_ratio, (1920, 1080))
144
+
145
+ # Store video result
146
+ artifact = await context.store_video_result(
147
+ storage_url=video_url,
148
+ format="mp4",
149
+ width=width,
150
+ height=height,
151
+ duration=float(inputs.duration), # Convert "5" or "10" to float
152
+ output_index=0,
153
+ )
154
+
155
+ return GeneratorResult(outputs=[artifact])
156
+
157
+ async def estimate_cost(self, inputs: KlingVideoV25TurboProTextToVideoInput) -> float:
158
+ """Estimate cost for Kling v2.5 Turbo Pro generation.
159
+
160
+ Pricing information not provided in official documentation.
161
+ Estimated at $0.15 per video based on typical video generation costs.
162
+ Cost may vary based on duration and quality settings.
163
+ """
164
+ # Approximate cost per video
165
+ # 10-second videos may cost more than 5-second videos
166
+ base_cost = 0.15
167
+ duration_multiplier = 2.0 if inputs.duration == "10" else 1.0
168
+ return base_cost * duration_multiplier