@weirdfingers/baseboards 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +4 -1
  2. package/dist/index.js +131 -11
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -1
  5. package/templates/api/alembic/env.py +9 -1
  6. package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
  7. package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
  8. package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
  9. package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
  10. package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
  11. package/templates/api/config/generators.yaml +111 -0
  12. package/templates/api/src/boards/__init__.py +1 -1
  13. package/templates/api/src/boards/api/app.py +2 -1
  14. package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
  15. package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
  16. package/templates/api/src/boards/auth/factory.py +1 -1
  17. package/templates/api/src/boards/dbmodels/__init__.py +8 -22
  18. package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
  19. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
  20. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
  23. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
  24. package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
  25. package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
  26. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
  27. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
  30. package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
  31. package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
  32. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
  33. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
  34. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
  35. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
  36. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
  37. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
  38. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
  39. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
  40. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
  41. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
  42. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
  43. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
  44. package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
  45. package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
  46. package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
  47. package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
  48. package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
  49. package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
  50. package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
  51. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
  52. package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
  53. package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
  54. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
  55. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
  56. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
  57. package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
  58. package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
  59. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
  60. package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
  61. package/templates/api/src/boards/graphql/access_control.py +1 -1
  62. package/templates/api/src/boards/graphql/mutations/root.py +16 -4
  63. package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
  64. package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
  65. package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
  66. package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
  67. package/templates/api/src/boards/graphql/types/generation.py +62 -26
  68. package/templates/api/src/boards/middleware.py +1 -1
  69. package/templates/api/src/boards/storage/factory.py +2 -2
  70. package/templates/api/src/boards/tenant_isolation.py +9 -9
  71. package/templates/api/src/boards/workers/actors.py +10 -1
  72. package/templates/web/package.json +1 -1
  73. package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
  74. package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
  75. package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
  76. package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
@@ -0,0 +1,153 @@
1
+ """
2
+ MiniMax Hailuo 2.3 Pro image-to-video generator.
3
+
4
+ Advanced image-to-video generation model with 1080p resolution. Transforms static
5
+ images into dynamic videos using text prompts to guide the creative output.
6
+
7
+ Based on Fal AI's fal-ai/minimax/hailuo-2.3/pro/image-to-video model.
8
+ See: https://fal.ai/models/fal-ai/minimax/hailuo-2.3/pro/image-to-video
9
+ """
10
+
11
+ import os
12
+
13
+ from pydantic import BaseModel, Field
14
+
15
+ from ....artifacts import ImageArtifact
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class MinimaxHailuo23ProImageToVideoInput(BaseModel):
20
+ """Input schema for MiniMax Hailuo 2.3 Pro image-to-video generation.
21
+
22
+ Artifact fields (image_url) are automatically detected via type introspection
23
+ and resolved from generation IDs to ImageArtifact objects.
24
+ """
25
+
26
+ prompt: str = Field(
27
+ description="Text prompt for video generation",
28
+ min_length=1,
29
+ max_length=2000,
30
+ )
31
+ image_url: ImageArtifact = Field(description="URL of the image to use as the first frame")
32
+ prompt_optimizer: bool = Field(
33
+ default=True,
34
+ description="Whether to use the model's prompt optimizer",
35
+ )
36
+
37
+
38
+ class FalMinimaxHailuo23ProImageToVideoGenerator(BaseGenerator):
39
+ """Generator for creating videos from images using MiniMax Hailuo 2.3 Pro."""
40
+
41
+ name = "fal-minimax-hailuo-2-3-pro-image-to-video"
42
+ description = "Fal: MiniMax Hailuo 2.3 Pro - Image-to-video with 1080p resolution"
43
+ artifact_type = "video"
44
+
45
+ def get_input_schema(self) -> type[MinimaxHailuo23ProImageToVideoInput]:
46
+ """Return the input schema for this generator."""
47
+ return MinimaxHailuo23ProImageToVideoInput
48
+
49
+ async def generate(
50
+ self, inputs: MinimaxHailuo23ProImageToVideoInput, context: GeneratorExecutionContext
51
+ ) -> GeneratorResult:
52
+ """Generate video using fal.ai minimax/hailuo-2.3/pro/image-to-video."""
53
+ # Check for API key
54
+ if not os.getenv("FAL_KEY"):
55
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
56
+
57
+ # Import fal_client
58
+ try:
59
+ import fal_client
60
+ except ImportError as e:
61
+ raise ImportError(
62
+ "fal.ai SDK is required for FalMinimaxHailuo23ProImageToVideoGenerator. "
63
+ "Install with: pip install weirdfingers-boards[generators-fal]"
64
+ ) from e
65
+
66
+ # Upload image artifact to Fal's public storage
67
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
68
+ # - Localhost URLs (not publicly accessible)
69
+ # - Private S3 buckets (not publicly accessible)
70
+ # So we upload to Fal's temporary storage first
71
+ from ..utils import upload_artifacts_to_fal
72
+
73
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
74
+
75
+ # Prepare arguments for fal.ai API
76
+ arguments = {
77
+ "prompt": inputs.prompt,
78
+ "image_url": image_urls[0],
79
+ "prompt_optimizer": inputs.prompt_optimizer,
80
+ }
81
+
82
+ # Submit async job
83
+ handler = await fal_client.submit_async(
84
+ "fal-ai/minimax/hailuo-2.3/pro/image-to-video",
85
+ arguments=arguments,
86
+ )
87
+
88
+ # Store external job ID
89
+ await context.set_external_job_id(handler.request_id)
90
+
91
+ # Stream progress updates
92
+ from .....progress.models import ProgressUpdate
93
+
94
+ event_count = 0
95
+ async for event in handler.iter_events(with_logs=True):
96
+ event_count += 1
97
+ # Sample every 3rd event to avoid spam
98
+ if event_count % 3 == 0:
99
+ # Extract logs if available
100
+ logs = getattr(event, "logs", None)
101
+ if logs:
102
+ # Join log entries into a single message
103
+ if isinstance(logs, list):
104
+ message = " | ".join(str(log) for log in logs if log)
105
+ else:
106
+ message = str(logs)
107
+
108
+ if message:
109
+ await context.publish_progress(
110
+ ProgressUpdate(
111
+ job_id=handler.request_id,
112
+ status="processing",
113
+ progress=50.0,
114
+ phase="processing",
115
+ message=message,
116
+ )
117
+ )
118
+
119
+ # Get final result
120
+ result = await handler.get()
121
+
122
+ # Extract video from result
123
+ # Expected structure: {"video": {"url": "...", "content_type": "...", ...}}
124
+ video_data = result.get("video")
125
+ if not video_data:
126
+ raise ValueError("No video returned from fal.ai API")
127
+
128
+ video_url = video_data.get("url")
129
+ if not video_url:
130
+ raise ValueError("Video missing URL in fal.ai response")
131
+
132
+ # Store video result
133
+ # Note: Fal API doesn't provide video dimensions/duration in the response,
134
+ # so we'll use defaults based on the model's 1080p output
135
+ artifact = await context.store_video_result(
136
+ storage_url=video_url,
137
+ format="mp4",
138
+ width=1920,
139
+ height=1080,
140
+ duration=None, # Duration not provided in response
141
+ output_index=0,
142
+ )
143
+
144
+ return GeneratorResult(outputs=[artifact])
145
+
146
+ async def estimate_cost(self, inputs: MinimaxHailuo23ProImageToVideoInput) -> float:
147
+ """Estimate cost for this generation in USD.
148
+
149
+ Note: Pricing information not available in Fal documentation.
150
+ Using placeholder value that should be updated with actual pricing.
151
+ """
152
+ # TODO: Update with actual pricing from Fal when available
153
+ return 0.12 # Placeholder estimate for 1080p video generation
@@ -0,0 +1,172 @@
1
+ """
2
+ Sora 2 image-to-video generator.
3
+
4
+ OpenAI's state-of-the-art video generation model that creates richly detailed,
5
+ dynamic clips with audio from natural language prompts and images.
6
+
7
+ Based on Fal AI's fal-ai/sora-2/image-to-video model.
8
+ See: https://fal.ai/models/fal-ai/sora-2/image-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class Sora2ImageToVideoInput(BaseModel):
21
+ """Input schema for Sora 2 image-to-video generation.
22
+
23
+ Artifact fields (image_url) are automatically detected via type
24
+ introspection and resolved from generation IDs to ImageArtifact objects.
25
+ """
26
+
27
+ prompt: str = Field(
28
+ description="The text prompt describing the video you want to generate",
29
+ min_length=1,
30
+ max_length=5000,
31
+ )
32
+ image_url: ImageArtifact = Field(description="The image to use as the first frame")
33
+ resolution: Literal["auto", "720p"] = Field(
34
+ default="auto",
35
+ description="Resolution of the generated video",
36
+ )
37
+ aspect_ratio: Literal["auto", "9:16", "16:9"] = Field(
38
+ default="auto",
39
+ description="Aspect ratio of the generated video",
40
+ )
41
+ duration: Literal[4, 8, 12] = Field(
42
+ default=4,
43
+ description="Duration of the generated video in seconds",
44
+ )
45
+
46
+
47
+ class FalSora2ImageToVideoGenerator(BaseGenerator):
48
+ """Generator for creating videos from images using OpenAI's Sora 2."""
49
+
50
+ name = "fal-sora2-image-to-video"
51
+ description = "Fal: Sora 2 - Generate videos from images with audio"
52
+ artifact_type = "video"
53
+
54
+ def get_input_schema(self) -> type[Sora2ImageToVideoInput]:
55
+ """Return the input schema for this generator."""
56
+ return Sora2ImageToVideoInput
57
+
58
+ async def generate(
59
+ self, inputs: Sora2ImageToVideoInput, context: GeneratorExecutionContext
60
+ ) -> GeneratorResult:
61
+ """Generate video using fal.ai sora-2/image-to-video."""
62
+ # Check for API key
63
+ if not os.getenv("FAL_KEY"):
64
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
65
+
66
+ # Import fal_client
67
+ try:
68
+ import fal_client
69
+ except ImportError as e:
70
+ raise ImportError(
71
+ "fal.ai SDK is required for FalSora2ImageToVideoGenerator. "
72
+ "Install with: pip install weirdfingers-boards[generators-fal]"
73
+ ) from e
74
+
75
+ # Upload image artifact to Fal's public storage
76
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
77
+ # - Localhost URLs (not publicly accessible)
78
+ # - Private S3 buckets (not publicly accessible)
79
+ # So we upload to Fal's temporary storage first
80
+ from ..utils import upload_artifacts_to_fal
81
+
82
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
83
+
84
+ # Prepare arguments for fal.ai API
85
+ arguments = {
86
+ "prompt": inputs.prompt,
87
+ "image_url": image_urls[0],
88
+ "resolution": inputs.resolution,
89
+ "aspect_ratio": inputs.aspect_ratio,
90
+ "duration": inputs.duration,
91
+ }
92
+
93
+ # Submit async job
94
+ handler = await fal_client.submit_async(
95
+ "fal-ai/sora-2/image-to-video",
96
+ arguments=arguments,
97
+ )
98
+
99
+ # Store external job ID
100
+ await context.set_external_job_id(handler.request_id)
101
+
102
+ # Stream progress updates
103
+ from .....progress.models import ProgressUpdate
104
+
105
+ event_count = 0
106
+ async for event in handler.iter_events(with_logs=True):
107
+ event_count += 1
108
+ # Sample every 3rd event to avoid spam
109
+ if event_count % 3 == 0:
110
+ # Extract logs if available
111
+ logs = getattr(event, "logs", None)
112
+ if logs:
113
+ # Join log entries into a single message
114
+ if isinstance(logs, list):
115
+ message = " | ".join(str(log) for log in logs if log)
116
+ else:
117
+ message = str(logs)
118
+
119
+ if message:
120
+ await context.publish_progress(
121
+ ProgressUpdate(
122
+ job_id=handler.request_id,
123
+ status="processing",
124
+ progress=50.0,
125
+ phase="processing",
126
+ message=message,
127
+ )
128
+ )
129
+
130
+ # Get final result
131
+ result = await handler.get()
132
+
133
+ # Extract video from result
134
+ # Expected structure: {"video": {"url": "...", "width": 1280, "height": 720, ...}}
135
+ video_data = result.get("video")
136
+ if not video_data:
137
+ raise ValueError("No video returned from fal.ai API")
138
+
139
+ video_url = video_data.get("url")
140
+ if not video_url:
141
+ raise ValueError("Video missing URL in fal.ai response")
142
+
143
+ # Extract metadata from response (if available)
144
+ width = video_data.get("width", 1280)
145
+ height = video_data.get("height", 720)
146
+ duration_seconds = video_data.get("duration", inputs.duration)
147
+ fps = video_data.get("fps", 30)
148
+
149
+ # Store video result
150
+ artifact = await context.store_video_result(
151
+ storage_url=video_url,
152
+ format="mp4",
153
+ width=width,
154
+ height=height,
155
+ duration=duration_seconds,
156
+ fps=fps,
157
+ output_index=0,
158
+ )
159
+
160
+ return GeneratorResult(outputs=[artifact])
161
+
162
+ async def estimate_cost(self, inputs: Sora2ImageToVideoInput) -> float:
163
+ """Estimate cost for this generation in USD.
164
+
165
+ Note: Pricing information not disclosed in Fal documentation.
166
+ Using placeholder value that should be updated with actual pricing.
167
+ """
168
+ # TODO: Update with actual pricing from Fal when available
169
+ # Estimate based on duration - longer videos likely cost more
170
+ base_cost = 0.20 # Placeholder estimate for 4s
171
+ duration_multiplier = inputs.duration / 4.0
172
+ return base_cost * duration_multiplier
@@ -0,0 +1,175 @@
1
+ """
2
+ OpenAI Sora 2 Image-to-Video Pro generator.
3
+
4
+ Image-to-video endpoint for Sora 2 Pro, OpenAI's state-of-the-art video model
5
+ capable of creating richly detailed, dynamic clips with audio from natural
6
+ language prompts and images.
7
+
8
+ Based on Fal AI's fal-ai/sora-2/image-to-video/pro model.
9
+ See: https://fal.ai/models/fal-ai/sora-2/image-to-video/pro
10
+ """
11
+
12
+ import os
13
+ from typing import Literal
14
+
15
+ from pydantic import BaseModel, Field
16
+
17
+ from ....artifacts import ImageArtifact
18
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
19
+
20
+
21
+ class Sora2ImageToVideoProInput(BaseModel):
22
+ """Input schema for Sora 2 Image-to-Video Pro generation.
23
+
24
+ Artifact fields (image_url) are automatically detected via type
25
+ introspection and resolved from generation IDs to ImageArtifact objects.
26
+ """
27
+
28
+ prompt: str = Field(
29
+ description="The text prompt describing the video you want to generate",
30
+ min_length=1,
31
+ max_length=5000,
32
+ )
33
+ image_url: ImageArtifact = Field(description="The image to use as the first frame of the video")
34
+ resolution: Literal["auto", "720p", "1080p"] = Field(
35
+ default="auto",
36
+ description="Resolution of the generated video. 'auto' selects optimal resolution",
37
+ )
38
+ aspect_ratio: Literal["auto", "9:16", "16:9"] = Field(
39
+ default="auto",
40
+ description="Aspect ratio of the generated video. 'auto' uses the image's aspect ratio",
41
+ )
42
+ duration: Literal[4, 8, 12] = Field(
43
+ default=4,
44
+ description="Duration of the generated video in seconds",
45
+ )
46
+
47
+
48
+ class FalSora2ImageToVideoProGenerator(BaseGenerator):
49
+ """Generator for creating videos from images using OpenAI Sora 2 Pro."""
50
+
51
+ name = "fal-sora-2-image-to-video-pro"
52
+ description = "Fal: Sora 2 Pro - Create dynamic videos with audio from images and text prompts"
53
+ artifact_type = "video"
54
+
55
+ def get_input_schema(self) -> type[Sora2ImageToVideoProInput]:
56
+ """Return the input schema for this generator."""
57
+ return Sora2ImageToVideoProInput
58
+
59
+ async def generate(
60
+ self, inputs: Sora2ImageToVideoProInput, context: GeneratorExecutionContext
61
+ ) -> GeneratorResult:
62
+ """Generate video using fal.ai sora-2/image-to-video/pro."""
63
+ # Check for API key
64
+ if not os.getenv("FAL_KEY"):
65
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
66
+
67
+ # Import fal_client
68
+ try:
69
+ import fal_client
70
+ except ImportError as e:
71
+ raise ImportError(
72
+ "fal.ai SDK is required for FalSora2ImageToVideoProGenerator. "
73
+ "Install with: pip install weirdfingers-boards[generators-fal]"
74
+ ) from e
75
+
76
+ # Upload image artifact to Fal's public storage
77
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
78
+ # - Localhost URLs (not publicly accessible)
79
+ # - Private S3 buckets (not publicly accessible)
80
+ # So we upload to Fal's temporary storage first
81
+ from ..utils import upload_artifacts_to_fal
82
+
83
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
84
+
85
+ # Prepare arguments for fal.ai API
86
+ arguments = {
87
+ "prompt": inputs.prompt,
88
+ "image_url": image_urls[0],
89
+ "resolution": inputs.resolution,
90
+ "aspect_ratio": inputs.aspect_ratio,
91
+ "duration": inputs.duration,
92
+ }
93
+
94
+ # Submit async job
95
+ handler = await fal_client.submit_async(
96
+ "fal-ai/sora-2/image-to-video/pro",
97
+ arguments=arguments,
98
+ )
99
+
100
+ # Store external job ID
101
+ await context.set_external_job_id(handler.request_id)
102
+
103
+ # Stream progress updates
104
+ from .....progress.models import ProgressUpdate
105
+
106
+ event_count = 0
107
+ async for event in handler.iter_events(with_logs=True):
108
+ event_count += 1
109
+ # Sample every 3rd event to avoid spam
110
+ if event_count % 3 == 0:
111
+ # Extract logs if available
112
+ logs = getattr(event, "logs", None)
113
+ if logs:
114
+ # Join log entries into a single message
115
+ if isinstance(logs, list):
116
+ message = " | ".join(str(log) for log in logs if log)
117
+ else:
118
+ message = str(logs)
119
+
120
+ if message:
121
+ await context.publish_progress(
122
+ ProgressUpdate(
123
+ job_id=handler.request_id,
124
+ status="processing",
125
+ progress=50.0,
126
+ phase="processing",
127
+ message=message,
128
+ )
129
+ )
130
+
131
+ # Get final result
132
+ result = await handler.get()
133
+
134
+ # Extract video from result
135
+ # Expected structure: {"video": {"url": "...", "width": ..., "height": ..., ...}}
136
+ video_data = result.get("video")
137
+ if not video_data:
138
+ raise ValueError("No video returned from fal.ai API")
139
+
140
+ video_url = video_data.get("url")
141
+ if not video_url:
142
+ raise ValueError("Video missing URL in fal.ai response")
143
+
144
+ # Extract video metadata from response
145
+ width = video_data.get("width", 1280)
146
+ height = video_data.get("height", 720)
147
+ duration_seconds = video_data.get("duration", inputs.duration)
148
+ fps = video_data.get("fps", 30)
149
+
150
+ # Store video result
151
+ artifact = await context.store_video_result(
152
+ storage_url=video_url,
153
+ format="mp4",
154
+ width=width,
155
+ height=height,
156
+ duration=duration_seconds,
157
+ fps=fps,
158
+ output_index=0,
159
+ )
160
+
161
+ return GeneratorResult(outputs=[artifact])
162
+
163
+ async def estimate_cost(self, inputs: Sora2ImageToVideoProInput) -> float:
164
+ """Estimate cost for this generation in USD.
165
+
166
+ Note: Pricing information not available in Fal documentation.
167
+ Using placeholder value that should be updated with actual pricing.
168
+ """
169
+ # TODO: Update with actual pricing from Fal when available
170
+ # Cost likely increases with duration
171
+ base_cost = 0.50 # Placeholder estimate for 4s
172
+
173
+ # Adjust for duration
174
+ duration_multiplier = inputs.duration / 4 # 4s is base
175
+ return base_cost * duration_multiplier
@@ -0,0 +1,163 @@
1
+ """
2
+ Sora 2 Pro text-to-video generator.
3
+
4
+ OpenAI's state-of-the-art video model capable of creating richly detailed,
5
+ dynamic clips with audio from natural language descriptions.
6
+
7
+ Based on Fal AI's fal-ai/sora-2/text-to-video/pro model.
8
+ See: https://fal.ai/models/fal-ai/sora-2/text-to-video/pro
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class Sora2TextToVideoProInput(BaseModel):
20
+ """Input schema for Sora 2 Pro text-to-video generation."""
21
+
22
+ prompt: str = Field(
23
+ description="Describes the desired video",
24
+ min_length=1,
25
+ max_length=5000,
26
+ )
27
+ resolution: Literal["720p", "1080p"] = Field(
28
+ default="1080p",
29
+ description="Video resolution",
30
+ )
31
+ aspect_ratio: Literal["9:16", "16:9"] = Field(
32
+ default="16:9",
33
+ description="Video aspect ratio",
34
+ )
35
+ duration: Literal[4, 8, 12] = Field(
36
+ default=4,
37
+ description="Video duration in seconds",
38
+ )
39
+
40
+
41
+ class FalSora2TextToVideoProGenerator(BaseGenerator):
42
+ """Generator for text-to-video using Sora 2 Pro."""
43
+
44
+ name = "fal-sora-2-text-to-video-pro"
45
+ description = "Fal: Sora 2 Pro - OpenAI's state-of-the-art text-to-video model with audio"
46
+ artifact_type = "video"
47
+
48
+ def get_input_schema(self) -> type[Sora2TextToVideoProInput]:
49
+ """Return the input schema for this generator."""
50
+ return Sora2TextToVideoProInput
51
+
52
+ async def generate(
53
+ self, inputs: Sora2TextToVideoProInput, context: GeneratorExecutionContext
54
+ ) -> GeneratorResult:
55
+ """Generate video using fal.ai Sora 2 Pro model."""
56
+ # Check for API key
57
+ if not os.getenv("FAL_KEY"):
58
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
59
+
60
+ # Import fal_client
61
+ try:
62
+ import fal_client
63
+ except ImportError as e:
64
+ raise ImportError(
65
+ "fal.ai SDK is required for FalSora2TextToVideoProGenerator. "
66
+ "Install with: pip install weirdfingers-boards[generators-fal]"
67
+ ) from e
68
+
69
+ # Prepare arguments for fal.ai API
70
+ arguments = {
71
+ "prompt": inputs.prompt,
72
+ "resolution": inputs.resolution,
73
+ "aspect_ratio": inputs.aspect_ratio,
74
+ "duration": inputs.duration,
75
+ }
76
+
77
+ # Submit async job
78
+ handler = await fal_client.submit_async(
79
+ "fal-ai/sora-2/text-to-video/pro",
80
+ arguments=arguments,
81
+ )
82
+
83
+ # Store external job ID
84
+ await context.set_external_job_id(handler.request_id)
85
+
86
+ # Stream progress updates
87
+ from .....progress.models import ProgressUpdate
88
+
89
+ event_count = 0
90
+ async for event in handler.iter_events(with_logs=True):
91
+ event_count += 1
92
+ # Sample every 3rd event to avoid spam
93
+ if event_count % 3 == 0:
94
+ # Extract logs if available
95
+ logs = getattr(event, "logs", None)
96
+ if logs:
97
+ # Join log entries into a single message
98
+ if isinstance(logs, list):
99
+ message = " | ".join(str(log) for log in logs if log)
100
+ else:
101
+ message = str(logs)
102
+
103
+ if message:
104
+ await context.publish_progress(
105
+ ProgressUpdate(
106
+ job_id=handler.request_id,
107
+ status="processing",
108
+ progress=50.0, # Approximate mid-point progress
109
+ phase="processing",
110
+ message=message,
111
+ )
112
+ )
113
+
114
+ # Get final result
115
+ result = await handler.get()
116
+
117
+ # Extract video from result
118
+ # fal.ai returns: {"video": {"url": "...", "width": 1920, "height": 1080, ...}}
119
+ video_data = result.get("video")
120
+ if not video_data:
121
+ raise ValueError("No video returned from fal.ai API")
122
+
123
+ video_url = video_data.get("url")
124
+ if not video_url:
125
+ raise ValueError("Video missing URL in fal.ai response")
126
+
127
+ # Extract dimensions from response
128
+ width = video_data.get("width", 1920)
129
+ height = video_data.get("height", 1080)
130
+ fps = video_data.get("fps")
131
+ duration = video_data.get("duration", float(inputs.duration))
132
+
133
+ # Store video result
134
+ artifact = await context.store_video_result(
135
+ storage_url=video_url,
136
+ format="mp4",
137
+ width=width,
138
+ height=height,
139
+ duration=duration,
140
+ fps=fps,
141
+ output_index=0,
142
+ )
143
+
144
+ return GeneratorResult(outputs=[artifact])
145
+
146
+ async def estimate_cost(self, inputs: Sora2TextToVideoProInput) -> float:
147
+ """Estimate cost for Sora 2 Pro generation.
148
+
149
+ Pricing information not available in official documentation.
150
+ Estimated at $0.20-$0.80 per video based on duration and resolution.
151
+ Actual costs may vary.
152
+ """
153
+ # Base cost per second of video
154
+ # Higher resolution and longer duration increase cost
155
+ base_cost_per_second = 0.05
156
+
157
+ # Resolution multiplier
158
+ resolution_multiplier = 1.5 if inputs.resolution == "1080p" else 1.0
159
+
160
+ # Calculate total cost
161
+ total_cost = base_cost_per_second * inputs.duration * resolution_multiplier
162
+
163
+ return total_cost