@weirdfingers/baseboards 0.5.3 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +1 -1
  2. package/package.json +1 -1
  3. package/templates/api/alembic/env.py +9 -1
  4. package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
  5. package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
  6. package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
  7. package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
  8. package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
  9. package/templates/api/config/generators.yaml +111 -0
  10. package/templates/api/src/boards/__init__.py +1 -1
  11. package/templates/api/src/boards/api/app.py +2 -1
  12. package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
  13. package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
  14. package/templates/api/src/boards/auth/factory.py +1 -1
  15. package/templates/api/src/boards/dbmodels/__init__.py +8 -22
  16. package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
  17. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
  18. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
  19. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
  20. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
  23. package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
  24. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
  25. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
  26. package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
  27. package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
  30. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
  31. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
  32. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
  33. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
  34. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
  35. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
  36. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
  37. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
  38. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
  39. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
  40. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
  41. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
  42. package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
  43. package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
  44. package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
  45. package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
  46. package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
  47. package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
  48. package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
  49. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
  50. package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
  51. package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
  52. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
  53. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
  54. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
  55. package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
  56. package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
  57. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
  58. package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
  59. package/templates/api/src/boards/graphql/access_control.py +1 -1
  60. package/templates/api/src/boards/graphql/mutations/root.py +16 -4
  61. package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
  62. package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
  63. package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
  64. package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
  65. package/templates/api/src/boards/graphql/types/generation.py +62 -26
  66. package/templates/api/src/boards/middleware.py +1 -1
  67. package/templates/api/src/boards/storage/factory.py +2 -2
  68. package/templates/api/src/boards/tenant_isolation.py +9 -9
  69. package/templates/api/src/boards/workers/actors.py +10 -1
  70. package/templates/web/package.json +1 -1
  71. package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
  72. package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
  73. package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
  74. package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
@@ -0,0 +1,244 @@
1
+ """
2
+ fal.ai Qwen image editing generator.
3
+
4
+ An image editing model specializing in text editing within images.
5
+ Based on Fal AI's qwen-image-edit model.
6
+ See: https://fal.ai/models/fal-ai/qwen-image-edit
7
+ """
8
+
9
+ import os
10
+ from typing import Literal
11
+
12
+ from pydantic import BaseModel, Field
13
+
14
+ from ....artifacts import ImageArtifact
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class ImageSize(BaseModel):
19
+ """Custom image size with explicit width and height."""
20
+
21
+ width: int = Field(default=512, ge=1, le=14142, description="Image width in pixels")
22
+ height: int = Field(default=512, ge=1, le=14142, description="Image height in pixels")
23
+
24
+
25
+ class QwenImageEditInput(BaseModel):
26
+ """Input schema for Qwen image editing.
27
+
28
+ Artifact fields (like image_url) are automatically detected via type
29
+ introspection and resolved from generation IDs to ImageArtifact objects.
30
+ """
31
+
32
+ prompt: str = Field(description="Text guidance for image editing")
33
+ image_url: ImageArtifact = Field(description="Source image to be edited")
34
+ num_images: int = Field(
35
+ default=1,
36
+ ge=1,
37
+ le=4,
38
+ description="Number of edited images to generate",
39
+ )
40
+ image_size: (
41
+ Literal[
42
+ "square_hd",
43
+ "square",
44
+ "portrait_4_3",
45
+ "portrait_16_9",
46
+ "landscape_4_3",
47
+ "landscape_16_9",
48
+ ]
49
+ | ImageSize
50
+ | None
51
+ ) = Field(
52
+ default=None,
53
+ description=(
54
+ "Output image dimensions. Can be a preset (e.g., 'square_hd') or "
55
+ "custom dimensions with width/height"
56
+ ),
57
+ )
58
+ acceleration: Literal["none", "regular", "high"] = Field(
59
+ default="regular",
60
+ description="Speed optimization level",
61
+ )
62
+ output_format: Literal["jpeg", "png"] = Field(
63
+ default="png",
64
+ description="Output image format",
65
+ )
66
+ guidance_scale: float = Field(
67
+ default=4.0,
68
+ ge=0.0,
69
+ le=20.0,
70
+ description="CFG intensity controlling prompt adherence (0-20)",
71
+ )
72
+ num_inference_steps: int = Field(
73
+ default=30,
74
+ ge=2,
75
+ le=50,
76
+ description="Number of processing iterations for quality",
77
+ )
78
+ seed: int | None = Field(
79
+ default=None,
80
+ description="Random seed for reproducibility (optional)",
81
+ )
82
+ negative_prompt: str = Field(
83
+ default=" ",
84
+ description="Undesired characteristics to avoid in the edited image",
85
+ )
86
+ sync_mode: bool = Field(
87
+ default=False,
88
+ description=(
89
+ "If True, returns data URI instead of stored media "
90
+ "(output won't be available in request history)"
91
+ ),
92
+ )
93
+ enable_safety_checker: bool = Field(
94
+ default=True,
95
+ description="Enable NSFW content filtering",
96
+ )
97
+
98
+
99
+ class FalQwenImageEditGenerator(BaseGenerator):
100
+ """Qwen image editing generator using fal.ai."""
101
+
102
+ name = "fal-qwen-image-edit"
103
+ artifact_type = "image"
104
+ description = "Fal: Qwen Image Edit - AI-powered image editing with text editing capabilities"
105
+
106
+ def get_input_schema(self) -> type[QwenImageEditInput]:
107
+ return QwenImageEditInput
108
+
109
+ async def generate(
110
+ self, inputs: QwenImageEditInput, context: GeneratorExecutionContext
111
+ ) -> GeneratorResult:
112
+ """Edit images using fal.ai qwen-image-edit model."""
113
+ # Check for API key (fal-client uses FAL_KEY environment variable)
114
+ if not os.getenv("FAL_KEY"):
115
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
116
+
117
+ # Import fal_client
118
+ try:
119
+ import fal_client
120
+ except ImportError as e:
121
+ raise ImportError(
122
+ "fal.ai SDK is required for FalQwenImageEditGenerator. "
123
+ "Install with: pip install weirdfingers-boards[generators-fal]"
124
+ ) from e
125
+
126
+ # Upload image artifact to Fal's public storage
127
+ # Fal API requires publicly accessible URLs
128
+ from ..utils import upload_artifacts_to_fal
129
+
130
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
131
+ image_url = image_urls[0]
132
+
133
+ # Prepare arguments for fal.ai API
134
+ arguments = {
135
+ "prompt": inputs.prompt,
136
+ "image_url": image_url,
137
+ "num_images": inputs.num_images,
138
+ "acceleration": inputs.acceleration,
139
+ "output_format": inputs.output_format,
140
+ "guidance_scale": inputs.guidance_scale,
141
+ "num_inference_steps": inputs.num_inference_steps,
142
+ "negative_prompt": inputs.negative_prompt,
143
+ "sync_mode": inputs.sync_mode,
144
+ "enable_safety_checker": inputs.enable_safety_checker,
145
+ }
146
+
147
+ # Add optional fields if provided
148
+ if inputs.image_size is not None:
149
+ # If ImageSize object, convert to dict; otherwise use string directly
150
+ if isinstance(inputs.image_size, ImageSize):
151
+ arguments["image_size"] = {
152
+ "width": inputs.image_size.width,
153
+ "height": inputs.image_size.height,
154
+ }
155
+ else:
156
+ arguments["image_size"] = inputs.image_size
157
+
158
+ if inputs.seed is not None:
159
+ arguments["seed"] = inputs.seed
160
+
161
+ # Submit async job and get handler
162
+ handler = await fal_client.submit_async(
163
+ "fal-ai/qwen-image-edit",
164
+ arguments=arguments,
165
+ )
166
+
167
+ # Store the external job ID for tracking
168
+ await context.set_external_job_id(handler.request_id)
169
+
170
+ # Stream progress updates (sample every 3rd event to avoid spam)
171
+ from .....progress.models import ProgressUpdate
172
+
173
+ event_count = 0
174
+ async for event in handler.iter_events(with_logs=True):
175
+ event_count += 1
176
+
177
+ # Process every 3rd event to provide feedback without overwhelming
178
+ if event_count % 3 == 0:
179
+ # Extract logs if available
180
+ logs = getattr(event, "logs", None)
181
+ if logs:
182
+ # Join log entries into a single message
183
+ if isinstance(logs, list):
184
+ message = " | ".join(str(log) for log in logs if log)
185
+ else:
186
+ message = str(logs)
187
+
188
+ if message:
189
+ await context.publish_progress(
190
+ ProgressUpdate(
191
+ job_id=handler.request_id,
192
+ status="processing",
193
+ progress=50.0, # Approximate mid-point progress
194
+ phase="processing",
195
+ message=message,
196
+ )
197
+ )
198
+
199
+ # Get final result
200
+ result = await handler.get()
201
+
202
+ # Extract image URLs from result
203
+ # fal.ai returns: {
204
+ # "images": [{"url": "...", "width": ..., "height": ..., "content_type": "..."}, ...],
205
+ # "prompt": "...",
206
+ # "seed": ...,
207
+ # "has_nsfw_concepts": [...]
208
+ # }
209
+ images = result.get("images", [])
210
+
211
+ if not images:
212
+ raise ValueError("No images returned from fal.ai API")
213
+
214
+ # Store each image using output_index
215
+ artifacts = []
216
+ for idx, image_data in enumerate(images):
217
+ image_url_result = image_data.get("url")
218
+ # Extract dimensions from the response
219
+ width = image_data.get("width", 1024)
220
+ height = image_data.get("height", 1024)
221
+
222
+ if not image_url_result:
223
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
224
+
225
+ # Store with appropriate output_index
226
+ artifact = await context.store_image_result(
227
+ storage_url=image_url_result,
228
+ format=inputs.output_format,
229
+ width=width,
230
+ height=height,
231
+ output_index=idx,
232
+ )
233
+ artifacts.append(artifact)
234
+
235
+ return GeneratorResult(outputs=artifacts)
236
+
237
+ async def estimate_cost(self, inputs: QwenImageEditInput) -> float:
238
+ """Estimate cost for Qwen image edit generation.
239
+
240
+ Based on typical Fal image editing model pricing.
241
+ Using $0.05 per image as a reasonable estimate.
242
+ """
243
+ per_image_cost = 0.05
244
+ return per_image_cost * inputs.num_images
@@ -1,17 +1,59 @@
1
1
  """Fal.ai video generators."""
2
2
 
3
+ from .bytedance_seedance_v1_pro_text_to_video import (
4
+ FalBytedanceSeedanceV1ProTextToVideoGenerator,
5
+ )
6
+ from .creatify_lipsync import FalCreatifyLipsyncGenerator
7
+ from .fal_bytedance_seedance_v1_pro_image_to_video import (
8
+ FalBytedanceSeedanceV1ProImageToVideoGenerator,
9
+ )
10
+ from .fal_minimax_hailuo_02_standard_text_to_video import (
11
+ FalMinimaxHailuo02StandardTextToVideoGenerator,
12
+ )
13
+ from .fal_pixverse_lipsync import FalPixverseLipsyncGenerator
14
+ from .fal_sora_2_text_to_video import FalSora2TextToVideoGenerator
15
+ from .infinitalk import FalInfinitalkGenerator
16
+ from .kling_video_v2_5_turbo_pro_image_to_video import (
17
+ FalKlingVideoV25TurboProImageToVideoGenerator,
18
+ )
3
19
  from .kling_video_v2_5_turbo_pro_text_to_video import (
4
20
  FalKlingVideoV25TurboProTextToVideoGenerator,
5
21
  )
22
+ from .minimax_hailuo_2_3_pro_image_to_video import (
23
+ FalMinimaxHailuo23ProImageToVideoGenerator,
24
+ )
25
+ from .sora2_image_to_video import FalSora2ImageToVideoGenerator
26
+ from .sora_2_image_to_video_pro import FalSora2ImageToVideoProGenerator
27
+ from .sora_2_text_to_video_pro import FalSora2TextToVideoProGenerator
6
28
  from .sync_lipsync_v2 import FalSyncLipsyncV2Generator
29
+ from .sync_lipsync_v2_pro import FalSyncLipsyncV2ProGenerator
30
+ from .veed_lipsync import FalVeedLipsyncGenerator
31
+ from .veo3 import FalVeo3Generator
7
32
  from .veo31_first_last_frame_to_video import FalVeo31FirstLastFrameToVideoGenerator
8
33
  from .veo31_image_to_video import FalVeo31ImageToVideoGenerator
9
34
  from .veo31_reference_to_video import FalVeo31ReferenceToVideoGenerator
35
+ from .wan_pro_image_to_video import FalWanProImageToVideoGenerator
10
36
 
11
37
  __all__ = [
38
+ "FalInfinitalkGenerator",
39
+ "FalCreatifyLipsyncGenerator",
40
+ "FalBytedanceSeedanceV1ProImageToVideoGenerator",
41
+ "FalBytedanceSeedanceV1ProTextToVideoGenerator",
42
+ "FalKlingVideoV25TurboProImageToVideoGenerator",
12
43
  "FalKlingVideoV25TurboProTextToVideoGenerator",
44
+ "FalPixverseLipsyncGenerator",
45
+ "FalSora2TextToVideoProGenerator",
46
+ "FalSora2TextToVideoGenerator",
47
+ "FalMinimaxHailuo02StandardTextToVideoGenerator",
48
+ "FalMinimaxHailuo23ProImageToVideoGenerator",
49
+ "FalSora2ImageToVideoGenerator",
50
+ "FalSora2ImageToVideoProGenerator",
13
51
  "FalSyncLipsyncV2Generator",
52
+ "FalVeedLipsyncGenerator",
53
+ "FalSyncLipsyncV2ProGenerator",
54
+ "FalVeo3Generator",
14
55
  "FalVeo31FirstLastFrameToVideoGenerator",
15
56
  "FalVeo31ImageToVideoGenerator",
16
57
  "FalVeo31ReferenceToVideoGenerator",
58
+ "FalWanProImageToVideoGenerator",
17
59
  ]
@@ -0,0 +1,209 @@
1
+ """
2
+ Bytedance Seedance 1.0 Pro text-to-video generator.
3
+
4
+ A high quality video generation model developed by Bytedance that transforms
5
+ text prompts into professional-grade videos with customizable parameters.
6
+
7
+ Based on Fal AI's fal-ai/bytedance/seedance/v1/pro/text-to-video model.
8
+ See: https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/text-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class BytedanceSeedanceV1ProTextToVideoInput(BaseModel):
20
+ """Input schema for Bytedance Seedance 1.0 Pro text-to-video generation."""
21
+
22
+ prompt: str = Field(
23
+ description="Text description of the desired video content",
24
+ )
25
+ aspect_ratio: Literal["21:9", "16:9", "4:3", "1:1", "3:4", "9:16"] = Field(
26
+ default="16:9",
27
+ description="Video aspect ratio",
28
+ )
29
+ resolution: Literal["480p", "720p", "1080p"] = Field(
30
+ default="1080p",
31
+ description="Video resolution quality",
32
+ )
33
+ duration: Literal["2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"] = Field(
34
+ default="5",
35
+ description="Video length in seconds (2-12)",
36
+ )
37
+ enable_safety_checker: bool = Field(
38
+ default=True,
39
+ description="Enable safety checker to filter unsafe content",
40
+ )
41
+ camera_fixed: bool = Field(
42
+ default=False,
43
+ description="Whether to fix camera position during generation",
44
+ )
45
+ seed: int | None = Field(
46
+ default=None,
47
+ description="Random seed for reproducibility; use -1 for randomization",
48
+ )
49
+
50
+
51
+ class FalBytedanceSeedanceV1ProTextToVideoGenerator(BaseGenerator):
52
+ """Generator for text-to-video using Bytedance Seedance 1.0 Pro."""
53
+
54
+ name = "fal-bytedance-seedance-v1-pro-text-to-video"
55
+ description = "Fal: Bytedance Seedance 1.0 Pro - high quality text-to-video generation"
56
+ artifact_type = "video"
57
+
58
+ def get_input_schema(self) -> type[BytedanceSeedanceV1ProTextToVideoInput]:
59
+ """Return the input schema for this generator."""
60
+ return BytedanceSeedanceV1ProTextToVideoInput
61
+
62
+ async def generate(
63
+ self, inputs: BytedanceSeedanceV1ProTextToVideoInput, context: GeneratorExecutionContext
64
+ ) -> GeneratorResult:
65
+ """Generate video using fal.ai Bytedance Seedance 1.0 Pro model."""
66
+ # Check for API key
67
+ if not os.getenv("FAL_KEY"):
68
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
69
+
70
+ # Import fal_client
71
+ try:
72
+ import fal_client
73
+ except ImportError as e:
74
+ raise ImportError(
75
+ "fal.ai SDK is required for FalBytedanceSeedanceV1ProTextToVideoGenerator. "
76
+ "Install with: pip install weirdfingers-boards[generators-fal]"
77
+ ) from e
78
+
79
+ # Prepare arguments for fal.ai API
80
+ arguments = {
81
+ "prompt": inputs.prompt,
82
+ "aspect_ratio": inputs.aspect_ratio,
83
+ "resolution": inputs.resolution,
84
+ "duration": inputs.duration,
85
+ "enable_safety_checker": inputs.enable_safety_checker,
86
+ "camera_fixed": inputs.camera_fixed,
87
+ }
88
+
89
+ # Add seed if provided
90
+ if inputs.seed is not None:
91
+ arguments["seed"] = inputs.seed
92
+
93
+ # Submit async job
94
+ handler = await fal_client.submit_async(
95
+ "fal-ai/bytedance/seedance/v1/pro/text-to-video",
96
+ arguments=arguments,
97
+ )
98
+
99
+ # Store external job ID
100
+ await context.set_external_job_id(handler.request_id)
101
+
102
+ # Stream progress updates
103
+ from .....progress.models import ProgressUpdate
104
+
105
+ event_count = 0
106
+ async for event in handler.iter_events(with_logs=True):
107
+ event_count += 1
108
+ # Sample every 3rd event to avoid spam
109
+ if event_count % 3 == 0:
110
+ # Extract logs if available
111
+ logs = getattr(event, "logs", None)
112
+ if logs:
113
+ # Join log entries into a single message
114
+ if isinstance(logs, list):
115
+ message = " | ".join(str(log) for log in logs if log)
116
+ else:
117
+ message = str(logs)
118
+
119
+ if message:
120
+ await context.publish_progress(
121
+ ProgressUpdate(
122
+ job_id=handler.request_id,
123
+ status="processing",
124
+ progress=50.0, # Approximate mid-point progress
125
+ phase="processing",
126
+ message=message,
127
+ )
128
+ )
129
+
130
+ # Get final result
131
+ result = await handler.get()
132
+
133
+ # Extract video from result
134
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}, "seed": 123}
135
+ video_data = result.get("video")
136
+ if not video_data:
137
+ raise ValueError("No video returned from fal.ai API")
138
+
139
+ video_url = video_data.get("url")
140
+ if not video_url:
141
+ raise ValueError("Video missing URL in fal.ai response")
142
+
143
+ # Calculate video dimensions based on aspect ratio and resolution
144
+ width, height = self._calculate_dimensions(inputs.aspect_ratio, inputs.resolution)
145
+
146
+ # Store video result
147
+ artifact = await context.store_video_result(
148
+ storage_url=video_url,
149
+ format="mp4",
150
+ width=width,
151
+ height=height,
152
+ duration=float(inputs.duration),
153
+ output_index=0,
154
+ )
155
+
156
+ return GeneratorResult(outputs=[artifact])
157
+
158
+ def _calculate_dimensions(self, aspect_ratio: str, resolution: str) -> tuple[int, int]:
159
+ """Calculate video dimensions based on aspect ratio and resolution.
160
+
161
+ Args:
162
+ aspect_ratio: Video aspect ratio (e.g., "16:9", "21:9")
163
+ resolution: Video resolution (e.g., "1080p", "720p", "480p")
164
+
165
+ Returns:
166
+ Tuple of (width, height) in pixels
167
+ """
168
+ # Base heights for each resolution
169
+ resolution_heights = {
170
+ "1080p": 1080,
171
+ "720p": 720,
172
+ "480p": 480,
173
+ }
174
+
175
+ # Parse aspect ratio
176
+ aspect_parts = aspect_ratio.split(":")
177
+ aspect_width = int(aspect_parts[0])
178
+ aspect_height = int(aspect_parts[1])
179
+
180
+ # Get base height for resolution
181
+ height = resolution_heights[resolution]
182
+
183
+ # Calculate width based on aspect ratio
184
+ width = int((height * aspect_width) / aspect_height)
185
+
186
+ return width, height
187
+
188
+ async def estimate_cost(self, inputs: BytedanceSeedanceV1ProTextToVideoInput) -> float:
189
+ """Estimate cost for Bytedance Seedance 1.0 Pro generation.
190
+
191
+ Pricing information not provided in official documentation.
192
+ Estimated at $0.12 per video based on typical video generation costs.
193
+ Cost may vary based on duration and resolution settings.
194
+ """
195
+ # Base cost per video
196
+ base_cost = 0.12
197
+
198
+ # Adjust for longer durations (higher cost for longer videos)
199
+ duration_seconds = int(inputs.duration)
200
+ duration_multiplier = 1.0 + ((duration_seconds - 5) * 0.05) # +5% per second above 5s
201
+
202
+ # Adjust for higher resolutions
203
+ resolution_multiplier = {
204
+ "480p": 0.8, # Lower quality, lower cost
205
+ "720p": 1.0, # Standard
206
+ "1080p": 1.3, # Higher quality, higher cost
207
+ }[inputs.resolution]
208
+
209
+ return base_cost * duration_multiplier * resolution_multiplier
@@ -0,0 +1,161 @@
1
+ """
2
+ fal.ai creatify/lipsync video generator.
3
+
4
+ Generates realistic lip-synchronization videos from audio and video inputs
5
+ using Creatify's lipsync model on fal.ai. Optimized for speed, quality, and
6
+ consistency.
7
+
8
+ Based on Fal AI's creatify/lipsync model.
9
+ See: https://fal.ai/models/creatify/lipsync
10
+ """
11
+
12
+ import os
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import AudioArtifact, VideoArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class CreatifyLipsyncInput(BaseModel):
21
+ """Input schema for creatify/lipsync.
22
+
23
+ Artifact fields are automatically detected via type introspection
24
+ and resolved from generation IDs to artifact objects.
25
+ """
26
+
27
+ video: VideoArtifact = Field(description="The video to use for lipsync")
28
+ audio: AudioArtifact = Field(description="The audio to use for lipsync")
29
+ loop: bool = Field(
30
+ default=True,
31
+ description="Repeats video if shorter than audio",
32
+ )
33
+
34
+
35
+ class FalCreatifyLipsyncGenerator(BaseGenerator):
36
+ """Generator for realistic lip-synchronization videos."""
37
+
38
+ name = "fal-creatify-lipsync"
39
+ description = "Fal: Creatify Lipsync - Realistic lipsync video optimized for speed and quality"
40
+ artifact_type = "video"
41
+
42
+ def get_input_schema(self) -> type[CreatifyLipsyncInput]:
43
+ """Return the input schema for this generator."""
44
+ return CreatifyLipsyncInput
45
+
46
+ async def generate(
47
+ self, inputs: CreatifyLipsyncInput, context: GeneratorExecutionContext
48
+ ) -> GeneratorResult:
49
+ """Generate lip-synced video using creatify/lipsync."""
50
+ # Check for API key
51
+ if not os.getenv("FAL_KEY"):
52
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
53
+
54
+ # Import fal_client
55
+ try:
56
+ import fal_client
57
+ except ImportError as e:
58
+ raise ImportError(
59
+ "fal.ai SDK is required for FalCreatifyLipsyncGenerator. "
60
+ "Install with: pip install weirdfingers-boards[generators-fal]"
61
+ ) from e
62
+
63
+ # Upload video and audio artifacts to Fal's public storage
64
+ # Fal API requires publicly accessible URLs
65
+ from ..utils import upload_artifacts_to_fal
66
+
67
+ # Upload video and audio separately
68
+ video_urls = await upload_artifacts_to_fal([inputs.video], context)
69
+ audio_urls = await upload_artifacts_to_fal([inputs.audio], context)
70
+
71
+ # Prepare arguments for fal.ai API
72
+ arguments = {
73
+ "video_url": video_urls[0],
74
+ "audio_url": audio_urls[0],
75
+ "loop": inputs.loop,
76
+ }
77
+
78
+ # Submit async job
79
+ handler = await fal_client.submit_async(
80
+ "creatify/lipsync",
81
+ arguments=arguments,
82
+ )
83
+
84
+ # Store external job ID
85
+ await context.set_external_job_id(handler.request_id)
86
+
87
+ # Stream progress updates
88
+ from .....progress.models import ProgressUpdate
89
+
90
+ event_count = 0
91
+ async for event in handler.iter_events(with_logs=True):
92
+ event_count += 1
93
+ # Sample every 3rd event to avoid spam
94
+ if event_count % 3 == 0:
95
+ # Extract logs if available
96
+ logs = getattr(event, "logs", None)
97
+ if logs:
98
+ # Join log entries into a single message
99
+ if isinstance(logs, list):
100
+ message = " | ".join(str(log) for log in logs if log)
101
+ else:
102
+ message = str(logs)
103
+
104
+ if message:
105
+ await context.publish_progress(
106
+ ProgressUpdate(
107
+ job_id=handler.request_id,
108
+ status="processing",
109
+ progress=50.0, # Approximate mid-point progress
110
+ phase="processing",
111
+ message=message,
112
+ )
113
+ )
114
+
115
+ # Get final result
116
+ result = await handler.get()
117
+
118
+ # Extract video from result
119
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}}
120
+ video_data = result.get("video")
121
+
122
+ if not video_data:
123
+ raise ValueError("No video returned from fal.ai API")
124
+
125
+ video_url = video_data.get("url")
126
+ if not video_url:
127
+ raise ValueError("Video missing URL in fal.ai response")
128
+
129
+ # Extract format from content_type (e.g., "video/mp4" -> "mp4")
130
+ # Creatify lipsync always produces MP4 videos, so default to mp4
131
+ content_type = video_data.get("content_type", "video/mp4")
132
+ if content_type.startswith("video/"):
133
+ video_format = content_type.split("/")[-1]
134
+ else:
135
+ # If content_type is not a video mime type (e.g., application/octet-stream),
136
+ # default to mp4 since creatify/lipsync only produces mp4 videos
137
+ video_format = "mp4"
138
+
139
+ # Store the video result
140
+ # Note: The API doesn't return width/height/duration/fps, so we use defaults
141
+ # The actual dimensions will be the same as the input video
142
+ artifact = await context.store_video_result(
143
+ storage_url=video_url,
144
+ format=video_format,
145
+ width=inputs.video.width,
146
+ height=inputs.video.height,
147
+ duration=inputs.audio.duration,
148
+ fps=inputs.video.fps,
149
+ output_index=0,
150
+ )
151
+
152
+ return GeneratorResult(outputs=[artifact])
153
+
154
+ async def estimate_cost(self, inputs: CreatifyLipsyncInput) -> float:
155
+ """Estimate cost for creatify/lipsync generation in USD.
156
+
157
+ Pricing not specified in documentation, using estimate based on
158
+ typical video processing costs.
159
+ """
160
+ # Base cost estimate per generation
161
+ return 0.05