@weirdfingers/baseboards 0.5.3 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +1 -1
  2. package/package.json +1 -1
  3. package/templates/api/alembic/env.py +9 -1
  4. package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
  5. package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
  6. package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
  7. package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
  8. package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
  9. package/templates/api/config/generators.yaml +111 -0
  10. package/templates/api/src/boards/__init__.py +1 -1
  11. package/templates/api/src/boards/api/app.py +2 -1
  12. package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
  13. package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
  14. package/templates/api/src/boards/auth/factory.py +1 -1
  15. package/templates/api/src/boards/dbmodels/__init__.py +8 -22
  16. package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
  17. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
  18. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
  19. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
  20. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
  23. package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
  24. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
  25. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
  26. package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
  27. package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
  30. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
  31. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
  32. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
  33. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
  34. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
  35. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
  36. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
  37. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
  38. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
  39. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
  40. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
  41. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
  42. package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
  43. package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
  44. package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
  45. package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
  46. package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
  47. package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
  48. package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
  49. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
  50. package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
  51. package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
  52. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
  53. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
  54. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
  55. package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
  56. package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
  57. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
  58. package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
  59. package/templates/api/src/boards/graphql/access_control.py +1 -1
  60. package/templates/api/src/boards/graphql/mutations/root.py +16 -4
  61. package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
  62. package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
  63. package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
  64. package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
  65. package/templates/api/src/boards/graphql/types/generation.py +62 -26
  66. package/templates/api/src/boards/middleware.py +1 -1
  67. package/templates/api/src/boards/storage/factory.py +2 -2
  68. package/templates/api/src/boards/tenant_isolation.py +9 -9
  69. package/templates/api/src/boards/workers/actors.py +10 -1
  70. package/templates/web/package.json +1 -1
  71. package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
  72. package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
  73. package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
  74. package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
@@ -0,0 +1,203 @@
1
+ """
2
+ fal.ai FLUX.2 [dev] text-to-image generator.
3
+
4
+ High-quality image generation using fal.ai's FLUX.2 [dev] model from Black Forest Labs.
5
+ Features enhanced realism, crisper text generation, and configurable acceleration.
6
+
7
+ See: https://fal.ai/models/fal-ai/flux-2
8
+ """
9
+
10
+ import os
11
+ from typing import Literal
12
+
13
+ from pydantic import BaseModel, Field
14
+
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class Flux2Input(BaseModel):
19
+ """Input schema for FLUX.2 [dev] image generation."""
20
+
21
+ prompt: str = Field(description="The text prompt for image generation")
22
+ num_images: int = Field(
23
+ default=1,
24
+ ge=1,
25
+ le=4,
26
+ description="Number of images to generate in batch (max 4)",
27
+ )
28
+ image_size: Literal[
29
+ "square_hd",
30
+ "square",
31
+ "portrait_4_3",
32
+ "portrait_16_9",
33
+ "landscape_4_3",
34
+ "landscape_16_9",
35
+ ] = Field(
36
+ default="landscape_4_3",
37
+ description="Predefined image size/aspect ratio",
38
+ )
39
+ acceleration: Literal["none", "regular", "high"] = Field(
40
+ default="regular",
41
+ description="Processing speed level (none = highest quality, high = fastest)",
42
+ )
43
+ output_format: Literal["jpeg", "png", "webp"] = Field(
44
+ default="png",
45
+ description="Output image format",
46
+ )
47
+ enable_prompt_expansion: bool = Field(
48
+ default=False,
49
+ description="Enhance prompt automatically for better results",
50
+ )
51
+ enable_safety_checker: bool = Field(
52
+ default=True,
53
+ description="Enable safety checker to filter NSFW content",
54
+ )
55
+ guidance_scale: float = Field(
56
+ default=2.5,
57
+ ge=0.0,
58
+ le=20.0,
59
+ description="Adherence strength to input prompt (0-20)",
60
+ )
61
+ num_inference_steps: int = Field(
62
+ default=28,
63
+ ge=4,
64
+ le=50,
65
+ description="Number of inference steps (4-50, higher = better quality but slower)",
66
+ )
67
+ seed: int | None = Field(
68
+ default=None,
69
+ description="Random seed for reproducibility (optional)",
70
+ )
71
+ sync_mode: bool = Field(
72
+ default=True,
73
+ description="Use synchronous mode (wait for completion)",
74
+ )
75
+
76
+
77
+ class FalFlux2Generator(BaseGenerator):
78
+ """FLUX.2 [dev] image generator using fal.ai."""
79
+
80
+ name = "fal-flux-2"
81
+ artifact_type = "image"
82
+ description = (
83
+ "Fal: FLUX.2 [dev] - enhanced realism, crisper text generation, "
84
+ "and native editing capabilities from Black Forest Labs"
85
+ )
86
+
87
+ def get_input_schema(self) -> type[Flux2Input]:
88
+ return Flux2Input
89
+
90
+ async def generate(
91
+ self, inputs: Flux2Input, context: GeneratorExecutionContext
92
+ ) -> GeneratorResult:
93
+ """Generate images using fal.ai FLUX.2 [dev] model."""
94
+ # Check for API key (fal-client uses FAL_KEY environment variable)
95
+ if not os.getenv("FAL_KEY"):
96
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
97
+
98
+ # Import fal_client
99
+ try:
100
+ import fal_client
101
+ except ImportError as e:
102
+ raise ImportError(
103
+ "fal.ai SDK is required for FalFlux2Generator. "
104
+ "Install with: pip install weirdfingers-boards[generators-fal]"
105
+ ) from e
106
+
107
+ # Prepare arguments for fal.ai API
108
+ arguments = {
109
+ "prompt": inputs.prompt,
110
+ "num_images": inputs.num_images,
111
+ "image_size": inputs.image_size,
112
+ "acceleration": inputs.acceleration,
113
+ "output_format": inputs.output_format,
114
+ "enable_prompt_expansion": inputs.enable_prompt_expansion,
115
+ "enable_safety_checker": inputs.enable_safety_checker,
116
+ "guidance_scale": inputs.guidance_scale,
117
+ "num_inference_steps": inputs.num_inference_steps,
118
+ "sync_mode": inputs.sync_mode,
119
+ }
120
+
121
+ # Add seed if provided
122
+ if inputs.seed is not None:
123
+ arguments["seed"] = inputs.seed
124
+
125
+ # Submit async job and get handler
126
+ handler = await fal_client.submit_async(
127
+ "fal-ai/flux-2",
128
+ arguments=arguments,
129
+ )
130
+
131
+ # Store the external job ID for tracking
132
+ await context.set_external_job_id(handler.request_id)
133
+
134
+ # Stream progress updates (sample every 3rd event to avoid spam)
135
+ from .....progress.models import ProgressUpdate
136
+
137
+ event_count = 0
138
+ async for event in handler.iter_events(with_logs=True):
139
+ event_count += 1
140
+
141
+ # Process every 3rd event to provide feedback without overwhelming
142
+ if event_count % 3 == 0:
143
+ # Extract logs if available
144
+ logs = getattr(event, "logs", None)
145
+ if logs:
146
+ # Join log entries into a single message
147
+ if isinstance(logs, list):
148
+ message = " | ".join(str(log) for log in logs if log)
149
+ else:
150
+ message = str(logs)
151
+
152
+ if message:
153
+ await context.publish_progress(
154
+ ProgressUpdate(
155
+ job_id=handler.request_id,
156
+ status="processing",
157
+ progress=50.0, # Approximate mid-point progress
158
+ phase="processing",
159
+ message=message,
160
+ )
161
+ )
162
+
163
+ # Get final result
164
+ result = await handler.get()
165
+
166
+ # Extract image URLs from result
167
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ...}, ...]}
168
+ images = result.get("images", [])
169
+ if not images:
170
+ raise ValueError("No images returned from fal.ai API")
171
+
172
+ # Store each image using output_index
173
+ artifacts = []
174
+ for idx, image_data in enumerate(images):
175
+ image_url = image_data.get("url")
176
+ width = image_data.get("width", 1024)
177
+ height = image_data.get("height", 1024)
178
+
179
+ if not image_url:
180
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
181
+
182
+ # Store with appropriate output_index
183
+ artifact = await context.store_image_result(
184
+ storage_url=image_url,
185
+ format=inputs.output_format,
186
+ width=width,
187
+ height=height,
188
+ output_index=idx,
189
+ )
190
+ artifacts.append(artifact)
191
+
192
+ return GeneratorResult(outputs=artifacts)
193
+
194
+ async def estimate_cost(self, inputs: Flux2Input) -> float:
195
+ """Estimate cost for FLUX.2 [dev] generation.
196
+
197
+ FLUX.2 [dev] pricing is approximately $0.055 per image based on
198
+ typical FLUX model pricing. Cost varies slightly based on
199
+ acceleration level and inference steps.
200
+ """
201
+ # Approximate cost per image
202
+ cost_per_image = 0.055
203
+ return cost_per_image * inputs.num_images
@@ -0,0 +1,230 @@
1
+ """
2
+ fal.ai FLUX.2 [dev] Edit image-to-image editing generator.
3
+
4
+ Edit images using fal.ai's flux-2/edit model, enabling precise modifications
5
+ using natural language descriptions and hex color control.
6
+ Based on Black Forest Labs' FLUX.2 [dev] model.
7
+ """
8
+
9
+ import os
10
+ from typing import Literal
11
+
12
+ from pydantic import BaseModel, Field
13
+
14
+ from ....artifacts import ImageArtifact
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class Flux2EditImageSize(BaseModel):
19
+ """Custom image size configuration."""
20
+
21
+ width: int = Field(ge=512, le=2048, description="Image width (512-2048)")
22
+ height: int = Field(ge=512, le=2048, description="Image height (512-2048)")
23
+
24
+
25
+ class Flux2EditInput(BaseModel):
26
+ """Input schema for FLUX.2 [dev] Edit image editing.
27
+
28
+ Artifact fields (like image_sources) are automatically detected via type
29
+ introspection and resolved from generation IDs to ImageArtifact objects.
30
+ """
31
+
32
+ prompt: str = Field(
33
+ description="Editing instruction (e.g., 'Change his clothes to casual suit and tie')"
34
+ )
35
+ image_sources: list[ImageArtifact] = Field(
36
+ description="List of input images for editing (max 3 images)",
37
+ min_length=1,
38
+ max_length=3,
39
+ )
40
+ num_images: int = Field(
41
+ default=1,
42
+ ge=1,
43
+ le=4,
44
+ description="Number of output images to generate (1-4)",
45
+ )
46
+ image_size: (
47
+ Literal["square_hd", "portrait_4_3", "landscape_16_9"] | Flux2EditImageSize | None
48
+ ) = Field(
49
+ default=None,
50
+ description=(
51
+ "Output image size - predefined (square_hd, portrait_4_3, landscape_16_9) "
52
+ "or custom dimensions"
53
+ ),
54
+ )
55
+ acceleration: Literal["none", "regular", "high"] = Field(
56
+ default="regular",
57
+ description="Acceleration mode for generation speed/quality tradeoff",
58
+ )
59
+ num_inference_steps: int = Field(
60
+ default=28,
61
+ ge=4,
62
+ le=50,
63
+ description="Number of inference steps (4-50, higher = better quality but slower)",
64
+ )
65
+ output_format: Literal["jpeg", "png", "webp"] = Field(
66
+ default="png",
67
+ description="Output image format",
68
+ )
69
+ guidance_scale: float = Field(
70
+ default=2.5,
71
+ ge=0.0,
72
+ le=20.0,
73
+ description="Guidance scale for generation (0-20)",
74
+ )
75
+ seed: int | None = Field(
76
+ default=None,
77
+ description="Random seed for reproducible outputs",
78
+ )
79
+ enable_prompt_expansion: bool = Field(
80
+ default=False,
81
+ description="Enable automatic prompt expansion for better results",
82
+ )
83
+ enable_safety_checker: bool = Field(
84
+ default=True,
85
+ description="Enable safety checker to filter NSFW content",
86
+ )
87
+ sync_mode: bool = Field(
88
+ default=False,
89
+ description="If True, return data URI instead of URL (output won't be in request history)",
90
+ )
91
+
92
+
93
+ class FalFlux2EditGenerator(BaseGenerator):
94
+ """FLUX.2 [dev] Edit image-to-image generator using fal.ai."""
95
+
96
+ name = "fal-flux-2-edit"
97
+ artifact_type = "image"
98
+ description = "Fal: FLUX.2 [dev] Edit - Precise image editing with natural language"
99
+
100
+ def get_input_schema(self) -> type[Flux2EditInput]:
101
+ return Flux2EditInput
102
+
103
+ async def generate(
104
+ self, inputs: Flux2EditInput, context: GeneratorExecutionContext
105
+ ) -> GeneratorResult:
106
+ """Edit images using fal.ai flux-2/edit model."""
107
+ # Check for API key (fal-client uses FAL_KEY environment variable)
108
+ if not os.getenv("FAL_KEY"):
109
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
110
+
111
+ # Import fal_client
112
+ try:
113
+ import fal_client
114
+ except ImportError as e:
115
+ raise ImportError(
116
+ "fal.ai SDK is required for FalFlux2EditGenerator. "
117
+ "Install with: pip install weirdfingers-boards[generators-fal]"
118
+ ) from e
119
+
120
+ # Upload image artifacts to Fal's public storage
121
+ from ..utils import upload_artifacts_to_fal
122
+
123
+ image_urls = await upload_artifacts_to_fal(inputs.image_sources, context)
124
+
125
+ # Prepare arguments for fal.ai API
126
+ arguments: dict[str, object] = {
127
+ "prompt": inputs.prompt,
128
+ "image_urls": image_urls,
129
+ "num_images": inputs.num_images,
130
+ "acceleration": inputs.acceleration,
131
+ "num_inference_steps": inputs.num_inference_steps,
132
+ "output_format": inputs.output_format,
133
+ "guidance_scale": inputs.guidance_scale,
134
+ "enable_prompt_expansion": inputs.enable_prompt_expansion,
135
+ "enable_safety_checker": inputs.enable_safety_checker,
136
+ "sync_mode": inputs.sync_mode,
137
+ }
138
+
139
+ # Add optional fields if provided
140
+ if inputs.image_size is not None:
141
+ if isinstance(inputs.image_size, str):
142
+ arguments["image_size"] = inputs.image_size
143
+ else:
144
+ # Custom size object
145
+ arguments["image_size"] = {
146
+ "width": inputs.image_size.width,
147
+ "height": inputs.image_size.height,
148
+ }
149
+
150
+ if inputs.seed is not None:
151
+ arguments["seed"] = inputs.seed
152
+
153
+ # Submit async job and get handler
154
+ handler = await fal_client.submit_async(
155
+ "fal-ai/flux-2/edit",
156
+ arguments=arguments,
157
+ )
158
+
159
+ # Store the external job ID for tracking
160
+ await context.set_external_job_id(handler.request_id)
161
+
162
+ # Stream progress updates (sample every 3rd event to avoid spam)
163
+ from .....progress.models import ProgressUpdate
164
+
165
+ event_count = 0
166
+ async for event in handler.iter_events(with_logs=True):
167
+ event_count += 1
168
+
169
+ # Process every 3rd event to provide feedback without overwhelming
170
+ if event_count % 3 == 0:
171
+ # Extract logs if available
172
+ logs = getattr(event, "logs", None)
173
+ if logs:
174
+ # Join log entries into a single message
175
+ if isinstance(logs, list):
176
+ message = " | ".join(str(log) for log in logs if log)
177
+ else:
178
+ message = str(logs)
179
+
180
+ if message:
181
+ await context.publish_progress(
182
+ ProgressUpdate(
183
+ job_id=handler.request_id,
184
+ status="processing",
185
+ progress=50.0, # Approximate mid-point progress
186
+ phase="processing",
187
+ message=message,
188
+ )
189
+ )
190
+
191
+ # Get final result
192
+ result = await handler.get()
193
+
194
+ # Extract image URLs from result
195
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ...}, ...]}
196
+ images = result.get("images", [])
197
+ if not images:
198
+ raise ValueError("No images returned from fal.ai API")
199
+
200
+ # Store each image using output_index
201
+ artifacts = []
202
+ for idx, image_data in enumerate(images):
203
+ image_url_result = image_data.get("url")
204
+ width = image_data.get("width", 1024)
205
+ height = image_data.get("height", 1024)
206
+
207
+ if not image_url_result:
208
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
209
+
210
+ # Store with appropriate output_index
211
+ artifact = await context.store_image_result(
212
+ storage_url=image_url_result,
213
+ format=inputs.output_format,
214
+ width=width,
215
+ height=height,
216
+ output_index=idx,
217
+ )
218
+ artifacts.append(artifact)
219
+
220
+ return GeneratorResult(outputs=artifacts)
221
+
222
+ async def estimate_cost(self, inputs: Flux2EditInput) -> float:
223
+ """Estimate cost for FLUX.2 Edit generation.
224
+
225
+ FLUX.2 [dev] Edit is a premium image editing model. Estimated cost
226
+ is approximately $0.06 per image based on similar Flux models.
227
+ """
228
+ # Cost per image * number of images
229
+ cost_per_image = 0.06
230
+ return cost_per_image * inputs.num_images
@@ -0,0 +1,204 @@
1
+ """
2
+ fal.ai FLUX.2 [pro] text-to-image generator.
3
+
4
+ Production-optimized generation with professional quality out of the box.
5
+ Studio-grade images through a streamlined pipeline that prioritizes consistency
6
+ and speed over parameter tuning.
7
+
8
+ Based on Fal AI's fal-ai/flux-2-pro model.
9
+ See: https://fal.ai/models/fal-ai/flux-2-pro
10
+ """
11
+
12
+ import os
13
+ from typing import Literal
14
+
15
+ from pydantic import BaseModel, Field
16
+
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class Flux2ProInput(BaseModel):
21
+ """Input schema for FLUX.2 [pro] image generation.
22
+
23
+ Note: FLUX.2 [pro] is designed for zero-configuration quality with
24
+ no inference steps or guidance parameters to adjust.
25
+ """
26
+
27
+ prompt: str = Field(description="Text prompt for image generation")
28
+ image_size: Literal[
29
+ "square_hd",
30
+ "square",
31
+ "portrait_4_3",
32
+ "portrait_16_9",
33
+ "landscape_4_3",
34
+ "landscape_16_9",
35
+ ] = Field(
36
+ default="landscape_4_3",
37
+ description="Image size preset. Available presets: square_hd, square, "
38
+ "portrait_4_3, portrait_16_9, landscape_4_3, landscape_16_9",
39
+ )
40
+ output_format: Literal["jpeg", "png"] = Field(
41
+ default="jpeg",
42
+ description="Output image format. JPEG for optimized file sizes, PNG for lossless quality",
43
+ )
44
+ safety_tolerance: Literal["1", "2", "3", "4", "5"] = Field(
45
+ default="2",
46
+ description="Safety tolerance level (1 = most strict, 5 = most permissive)",
47
+ )
48
+ enable_safety_checker: bool = Field(
49
+ default=True,
50
+ description="Enable safety checker to filter unsafe content",
51
+ )
52
+ seed: int | None = Field(
53
+ default=None,
54
+ description="Random seed for reproducible generation (optional)",
55
+ )
56
+ sync_mode: bool = Field(
57
+ default=True,
58
+ description="Use synchronous mode (wait for completion)",
59
+ )
60
+
61
+
62
+ # Approximate megapixels for each preset
63
+ _SIZE_MEGAPIXELS = {
64
+ "square_hd": 1.5, # ~1408x1408 (typical HD square)
65
+ "square": 1.0, # ~1024x1024
66
+ "portrait_4_3": 1.0, # ~768x1024
67
+ "portrait_16_9": 1.0, # ~576x1024
68
+ "landscape_4_3": 1.0, # ~1024x768
69
+ "landscape_16_9": 1.0, # ~1024x576
70
+ }
71
+
72
+
73
+ class FalFlux2ProGenerator(BaseGenerator):
74
+ """FLUX.2 [pro] image generator using fal.ai.
75
+
76
+ Production-optimized generation with professional quality out of the box.
77
+ Zero-configuration quality with no inference steps or guidance parameters
78
+ to adjust. Predictable results across batch generations.
79
+ """
80
+
81
+ name = "fal-flux-2-pro"
82
+ artifact_type = "image"
83
+ description = "Fal: FLUX.2 [pro] - production-optimized text-to-image with studio-grade quality"
84
+
85
+ def get_input_schema(self) -> type[Flux2ProInput]:
86
+ return Flux2ProInput
87
+
88
+ async def generate(
89
+ self, inputs: Flux2ProInput, context: GeneratorExecutionContext
90
+ ) -> GeneratorResult:
91
+ """Generate image using fal.ai FLUX.2 [pro] model."""
92
+ # Check for API key (fal-client uses FAL_KEY environment variable)
93
+ if not os.getenv("FAL_KEY"):
94
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
95
+
96
+ # Import fal_client
97
+ try:
98
+ import fal_client
99
+ except ImportError as e:
100
+ raise ImportError(
101
+ "fal.ai SDK is required for FalFlux2ProGenerator. "
102
+ "Install with: pip install weirdfingers-boards[generators-fal]"
103
+ ) from e
104
+
105
+ # Prepare arguments for fal.ai API
106
+ arguments = {
107
+ "prompt": inputs.prompt,
108
+ "image_size": inputs.image_size,
109
+ "output_format": inputs.output_format,
110
+ "safety_tolerance": inputs.safety_tolerance,
111
+ "enable_safety_checker": inputs.enable_safety_checker,
112
+ "sync_mode": inputs.sync_mode,
113
+ }
114
+
115
+ # Add seed if provided
116
+ if inputs.seed is not None:
117
+ arguments["seed"] = inputs.seed
118
+
119
+ # Submit async job and get handler
120
+ handler = await fal_client.submit_async(
121
+ "fal-ai/flux-2-pro",
122
+ arguments=arguments,
123
+ )
124
+
125
+ # Store the external job ID for tracking
126
+ await context.set_external_job_id(handler.request_id)
127
+
128
+ # Stream progress updates (sample every 3rd event to avoid spam)
129
+ from .....progress.models import ProgressUpdate
130
+
131
+ event_count = 0
132
+ async for event in handler.iter_events(with_logs=True):
133
+ event_count += 1
134
+
135
+ # Process every 3rd event to provide feedback without overwhelming
136
+ if event_count % 3 == 0:
137
+ # Extract logs if available
138
+ logs = getattr(event, "logs", None)
139
+ if logs:
140
+ # Join log entries into a single message
141
+ if isinstance(logs, list):
142
+ message = " | ".join(str(log) for log in logs if log)
143
+ else:
144
+ message = str(logs)
145
+
146
+ if message:
147
+ await context.publish_progress(
148
+ ProgressUpdate(
149
+ job_id=handler.request_id,
150
+ status="processing",
151
+ progress=50.0, # Approximate mid-point progress
152
+ phase="processing",
153
+ message=message,
154
+ )
155
+ )
156
+
157
+ # Get final result
158
+ result = await handler.get()
159
+
160
+ # Extract image URLs from result
161
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ...}, ...]}
162
+ images = result.get("images", [])
163
+ if not images:
164
+ raise ValueError("No images returned from fal.ai API")
165
+
166
+ # Store each image using output_index
167
+ artifacts = []
168
+ for idx, image_data in enumerate(images):
169
+ image_url = image_data.get("url")
170
+ width = image_data.get("width", 1024)
171
+ height = image_data.get("height", 1024)
172
+
173
+ if not image_url:
174
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
175
+
176
+ # Store with appropriate output_index
177
+ artifact = await context.store_image_result(
178
+ storage_url=image_url,
179
+ format=inputs.output_format,
180
+ width=width,
181
+ height=height,
182
+ output_index=idx,
183
+ )
184
+ artifacts.append(artifact)
185
+
186
+ return GeneratorResult(outputs=artifacts)
187
+
188
+ async def estimate_cost(self, inputs: Flux2ProInput) -> float:
189
+ """Estimate cost for FLUX.2 [pro] generation.
190
+
191
+ FLUX.2 [pro] billing is based on megapixels (rounded up):
192
+ - $0.03 for the first megapixel
193
+ - $0.015 per additional megapixel
194
+
195
+ For preset sizes, we estimate based on typical dimensions.
196
+ """
197
+ megapixels = _SIZE_MEGAPIXELS.get(inputs.image_size, 1.0)
198
+
199
+ # First megapixel is $0.03, each additional is $0.015
200
+ if megapixels <= 1:
201
+ return 0.03
202
+ else:
203
+ additional_megapixels = megapixels - 1
204
+ return 0.03 + (additional_megapixels * 0.015)