@weirdfingers/baseboards 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +4 -1
  2. package/dist/index.js +131 -11
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -1
  5. package/templates/api/alembic/env.py +9 -1
  6. package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
  7. package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
  8. package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
  9. package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
  10. package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
  11. package/templates/api/config/generators.yaml +111 -0
  12. package/templates/api/src/boards/__init__.py +1 -1
  13. package/templates/api/src/boards/api/app.py +2 -1
  14. package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
  15. package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
  16. package/templates/api/src/boards/auth/factory.py +1 -1
  17. package/templates/api/src/boards/dbmodels/__init__.py +8 -22
  18. package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
  19. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
  20. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
  23. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
  24. package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
  25. package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
  26. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
  27. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
  30. package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
  31. package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
  32. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
  33. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
  34. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
  35. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
  36. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
  37. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
  38. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
  39. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
  40. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
  41. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
  42. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
  43. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
  44. package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
  45. package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
  46. package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
  47. package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
  48. package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
  49. package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
  50. package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
  51. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
  52. package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
  53. package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
  54. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
  55. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
  56. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
  57. package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
  58. package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
  59. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
  60. package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
  61. package/templates/api/src/boards/graphql/access_control.py +1 -1
  62. package/templates/api/src/boards/graphql/mutations/root.py +16 -4
  63. package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
  64. package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
  65. package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
  66. package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
  67. package/templates/api/src/boards/graphql/types/generation.py +62 -26
  68. package/templates/api/src/boards/middleware.py +1 -1
  69. package/templates/api/src/boards/storage/factory.py +2 -2
  70. package/templates/api/src/boards/tenant_isolation.py +9 -9
  71. package/templates/api/src/boards/workers/actors.py +10 -1
  72. package/templates/web/package.json +1 -1
  73. package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
  74. package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
  75. package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
  76. package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
@@ -0,0 +1,221 @@
1
+ """
2
+ fal.ai FLUX 2 Pro Edit generator.
3
+
4
+ Production-grade multi-reference image editing that combines up to 9 reference
5
+ images through a streamlined pipeline for professional image manipulation.
6
+ Supports natural language precision, explicit image indexing with @ symbol,
7
+ and zero-configuration workflow.
8
+
9
+ Based on Fal AI's fal-ai/flux-2-pro/edit model.
10
+ See: https://fal.ai/models/fal-ai/flux-2-pro/edit
11
+ """
12
+
13
+ import os
14
+ from typing import Literal
15
+
16
+ from pydantic import BaseModel, Field
17
+
18
+ from ....artifacts import ImageArtifact
19
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
20
+
21
+ # Image size presets supported by the API
22
+ ImageSizePreset = Literal[
23
+ "auto",
24
+ "square_hd",
25
+ "square",
26
+ "portrait_4_3",
27
+ "portrait_16_9",
28
+ "landscape_4_3",
29
+ "landscape_16_9",
30
+ ]
31
+
32
+
33
+ class Flux2ProEditInput(BaseModel):
34
+ """Input schema for FLUX 2 Pro Edit.
35
+
36
+ Artifact fields (like image_sources) are automatically detected via type
37
+ introspection and resolved from generation IDs to ImageArtifact objects.
38
+ """
39
+
40
+ prompt: str = Field(
41
+ description="The prompt to edit the images. Use @ symbol to reference "
42
+ "specific input images by index (e.g., '@1' for first image)."
43
+ )
44
+ image_sources: list[ImageArtifact] = Field(
45
+ description="List of input images for editing (up to 9 images, 9 MP total)",
46
+ min_length=1,
47
+ max_length=9,
48
+ )
49
+ image_size: ImageSizePreset | None = Field(
50
+ default="auto",
51
+ description="The size of the generated image. If 'auto', the size will be "
52
+ "determined by the model based on input images.",
53
+ )
54
+ output_format: Literal["jpeg", "png"] = Field(
55
+ default="jpeg",
56
+ description="The format of the generated image.",
57
+ )
58
+ sync_mode: bool = Field(
59
+ default=False,
60
+ description=(
61
+ "If True, the media will be returned as a data URI and the output "
62
+ "data won't be available in the request history."
63
+ ),
64
+ )
65
+ safety_tolerance: Literal[1, 2, 3, 4, 5] = Field(
66
+ default=2,
67
+ description=(
68
+ "The safety tolerance level for the generated image. "
69
+ "1 is most strict, 5 is most permissive."
70
+ ),
71
+ )
72
+ enable_safety_checker: bool = Field(
73
+ default=True,
74
+ description="Whether to enable the safety checker.",
75
+ )
76
+ seed: int | None = Field(
77
+ default=None,
78
+ description="The seed to use for the generation. Leave empty for random.",
79
+ )
80
+
81
+
82
+ class FalFlux2ProEditGenerator(BaseGenerator):
83
+ """FLUX 2 Pro Edit image generator using fal.ai.
84
+
85
+ Production-grade multi-reference editing that combines up to 9 reference
86
+ images through a streamlined pipeline. Supports natural language precision
87
+ for describing complex edits without masks.
88
+ """
89
+
90
+ name = "fal-flux-2-pro-edit"
91
+ artifact_type = "image"
92
+ description = "Fal: FLUX 2 Pro Edit - Production-grade multi-reference image editing"
93
+
94
+ def get_input_schema(self) -> type[Flux2ProEditInput]:
95
+ return Flux2ProEditInput
96
+
97
+ async def generate(
98
+ self, inputs: Flux2ProEditInput, context: GeneratorExecutionContext
99
+ ) -> GeneratorResult:
100
+ """Edit images using fal.ai FLUX 2 Pro Edit model."""
101
+ # Check for API key (fal-client uses FAL_KEY environment variable)
102
+ if not os.getenv("FAL_KEY"):
103
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
104
+
105
+ # Import fal_client
106
+ try:
107
+ import fal_client
108
+ except ImportError as e:
109
+ raise ImportError(
110
+ "fal.ai SDK is required for FalFlux2ProEditGenerator. "
111
+ "Install with: pip install weirdfingers-boards[generators-fal]"
112
+ ) from e
113
+
114
+ # Upload image artifacts to Fal's public storage
115
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
116
+ # - Localhost URLs (not publicly accessible)
117
+ # - Private S3 buckets (not publicly accessible)
118
+ # So we upload to Fal's temporary storage first
119
+ from ..utils import upload_artifacts_to_fal
120
+
121
+ image_urls = await upload_artifacts_to_fal(inputs.image_sources, context)
122
+
123
+ # Prepare arguments for fal.ai API
124
+ arguments: dict = {
125
+ "prompt": inputs.prompt,
126
+ "image_urls": image_urls,
127
+ "output_format": inputs.output_format,
128
+ "sync_mode": inputs.sync_mode,
129
+ "safety_tolerance": inputs.safety_tolerance,
130
+ "enable_safety_checker": inputs.enable_safety_checker,
131
+ }
132
+
133
+ # Add optional parameters
134
+ if inputs.image_size is not None:
135
+ arguments["image_size"] = inputs.image_size
136
+ if inputs.seed is not None:
137
+ arguments["seed"] = inputs.seed
138
+
139
+ # Submit async job and get handler
140
+ handler = await fal_client.submit_async(
141
+ "fal-ai/flux-2-pro/edit",
142
+ arguments=arguments,
143
+ )
144
+
145
+ # Store the external job ID for tracking
146
+ await context.set_external_job_id(handler.request_id)
147
+
148
+ # Stream progress updates (sample every 3rd event to avoid spam)
149
+ from .....progress.models import ProgressUpdate
150
+
151
+ event_count = 0
152
+ async for event in handler.iter_events(with_logs=True):
153
+ event_count += 1
154
+
155
+ # Process every 3rd event to provide feedback without overwhelming
156
+ if event_count % 3 == 0:
157
+ # Extract logs if available
158
+ logs = getattr(event, "logs", None)
159
+ if logs:
160
+ # Join log entries into a single message
161
+ if isinstance(logs, list):
162
+ message = " | ".join(str(log) for log in logs if log)
163
+ else:
164
+ message = str(logs)
165
+
166
+ if message:
167
+ await context.publish_progress(
168
+ ProgressUpdate(
169
+ job_id=handler.request_id,
170
+ status="processing",
171
+ progress=50.0, # Approximate mid-point progress
172
+ phase="processing",
173
+ message=message,
174
+ )
175
+ )
176
+
177
+ # Get final result
178
+ result = await handler.get()
179
+
180
+ # Extract image URLs from result
181
+ # fal.ai returns: {
182
+ # "images": [{"url": "...", "width": ..., "height": ..., ...}, ...],
183
+ # "seed": ...
184
+ # }
185
+ images = result.get("images", [])
186
+
187
+ if not images:
188
+ raise ValueError("No images returned from fal.ai API")
189
+
190
+ # Store each image using output_index
191
+ artifacts = []
192
+ for idx, image_data in enumerate(images):
193
+ image_url = image_data.get("url")
194
+ # Extract dimensions if available, otherwise use sensible defaults
195
+ width = image_data.get("width", 1024)
196
+ height = image_data.get("height", 1024)
197
+
198
+ if not image_url:
199
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
200
+
201
+ # Store with appropriate output_index
202
+ artifact = await context.store_image_result(
203
+ storage_url=image_url,
204
+ format=inputs.output_format,
205
+ width=width,
206
+ height=height,
207
+ output_index=idx,
208
+ )
209
+ artifacts.append(artifact)
210
+
211
+ return GeneratorResult(outputs=artifacts)
212
+
213
+ async def estimate_cost(self, inputs: Flux2ProEditInput) -> float:
214
+ """Estimate cost for FLUX 2 Pro Edit generation.
215
+
216
+ Pricing: $0.03 for first megapixel, $0.015 for additional megapixels.
217
+ Using base cost of $0.03 as default (1 MP output).
218
+ """
219
+ # Base cost per generation (1 megapixel default)
220
+ base_cost = 0.03
221
+ return base_cost
@@ -0,0 +1,177 @@
1
+ """
2
+ Google Gemini 2.5 Flash Image text-to-image generator.
3
+
4
+ Google's state-of-the-art image generation and editing model available through fal.ai.
5
+ Supports multiple aspect ratios and output formats with batch generation up to 4 images.
6
+
7
+ Based on Fal AI's fal-ai/gemini-25-flash-image model.
8
+ See: https://fal.ai/models/fal-ai/gemini-25-flash-image
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class Gemini25FlashImageInput(BaseModel):
20
+ """Input schema for Gemini 2.5 Flash Image generation."""
21
+
22
+ prompt: str = Field(
23
+ description="Text prompt for image generation",
24
+ min_length=3,
25
+ max_length=5000,
26
+ )
27
+ num_images: int = Field(
28
+ default=1,
29
+ ge=1,
30
+ le=4,
31
+ description="Number of images to generate (max 4)",
32
+ )
33
+ aspect_ratio: Literal[
34
+ "21:9",
35
+ "16:9",
36
+ "3:2",
37
+ "4:3",
38
+ "5:4",
39
+ "1:1",
40
+ "4:5",
41
+ "3:4",
42
+ "2:3",
43
+ "9:16",
44
+ ] = Field(
45
+ default="1:1",
46
+ description="Image aspect ratio",
47
+ )
48
+ output_format: Literal["jpeg", "png", "webp"] = Field(
49
+ default="png",
50
+ description="Output image format",
51
+ )
52
+ sync_mode: bool = Field(
53
+ default=False,
54
+ description="Return media as data URI without request history storage",
55
+ )
56
+ limit_generations: bool = Field(
57
+ default=False,
58
+ description="Restrict to single generation per round (experimental)",
59
+ )
60
+
61
+
62
+ class FalGemini25FlashImageGenerator(BaseGenerator):
63
+ """Google Gemini 2.5 Flash Image generator using fal.ai."""
64
+
65
+ name = "fal-gemini-25-flash-image"
66
+ artifact_type = "image"
67
+ description = "Fal: Gemini 2.5 Flash Image - Google's state-of-the-art text-to-image generation"
68
+
69
+ def get_input_schema(self) -> type[Gemini25FlashImageInput]:
70
+ return Gemini25FlashImageInput
71
+
72
+ async def generate(
73
+ self, inputs: Gemini25FlashImageInput, context: GeneratorExecutionContext
74
+ ) -> GeneratorResult:
75
+ """Generate images using Google Gemini 2.5 Flash Image via fal.ai."""
76
+ # Check for API key (fal-client uses FAL_KEY environment variable)
77
+ if not os.getenv("FAL_KEY"):
78
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
79
+
80
+ # Import fal_client
81
+ try:
82
+ import fal_client
83
+ except ImportError as e:
84
+ raise ImportError(
85
+ "fal.ai SDK is required for FalGemini25FlashImageGenerator. "
86
+ "Install with: pip install weirdfingers-boards[generators-fal]"
87
+ ) from e
88
+
89
+ # Prepare arguments for fal.ai API
90
+ arguments = {
91
+ "prompt": inputs.prompt,
92
+ "num_images": inputs.num_images,
93
+ "aspect_ratio": inputs.aspect_ratio,
94
+ "output_format": inputs.output_format,
95
+ "sync_mode": inputs.sync_mode,
96
+ "limit_generations": inputs.limit_generations,
97
+ }
98
+
99
+ # Submit async job and get handler
100
+ handler = await fal_client.submit_async(
101
+ "fal-ai/gemini-25-flash-image",
102
+ arguments=arguments,
103
+ )
104
+
105
+ # Store the external job ID for tracking
106
+ await context.set_external_job_id(handler.request_id)
107
+
108
+ # Stream progress updates (sample every 3rd event to avoid spam)
109
+ from .....progress.models import ProgressUpdate
110
+
111
+ event_count = 0
112
+ async for event in handler.iter_events(with_logs=True):
113
+ event_count += 1
114
+
115
+ # Process every 3rd event to provide feedback without overwhelming
116
+ if event_count % 3 == 0:
117
+ # Extract logs if available
118
+ logs = getattr(event, "logs", None)
119
+ if logs:
120
+ # Join log entries into a single message
121
+ if isinstance(logs, list):
122
+ message = " | ".join(str(log) for log in logs if log)
123
+ else:
124
+ message = str(logs)
125
+
126
+ if message:
127
+ await context.publish_progress(
128
+ ProgressUpdate(
129
+ job_id=handler.request_id,
130
+ status="processing",
131
+ progress=50.0, # Approximate mid-point progress
132
+ phase="processing",
133
+ message=message,
134
+ )
135
+ )
136
+
137
+ # Get final result
138
+ result = await handler.get()
139
+
140
+ # Extract image URLs from result
141
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ..., ...}, ...]}
142
+ images = result.get("images", [])
143
+ if not images:
144
+ raise ValueError("No images returned from fal.ai API")
145
+
146
+ # Store each image using output_index
147
+ artifacts = []
148
+ for idx, image_data in enumerate(images):
149
+ image_url = image_data.get("url")
150
+ # Use 'or' to handle explicit None values from API
151
+ width = image_data.get("width") or 1024
152
+ height = image_data.get("height") or 1024
153
+
154
+ if not image_url:
155
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
156
+
157
+ # Store with appropriate output_index
158
+ artifact = await context.store_image_result(
159
+ storage_url=image_url,
160
+ format=inputs.output_format,
161
+ width=width,
162
+ height=height,
163
+ output_index=idx,
164
+ )
165
+ artifacts.append(artifact)
166
+
167
+ return GeneratorResult(outputs=artifacts)
168
+
169
+ async def estimate_cost(self, inputs: Gemini25FlashImageInput) -> float:
170
+ """Estimate cost for Gemini 2.5 Flash Image generation.
171
+
172
+ TODO: Pricing information not available in fal.ai documentation.
173
+ This is a placeholder estimate that should be updated when pricing is known.
174
+ """
175
+ # Placeholder cost estimate per image (to be updated with actual pricing)
176
+ cost_per_image = 0.00 # Unknown pricing
177
+ return cost_per_image * inputs.num_images
@@ -0,0 +1,182 @@
1
+ """
2
+ fal.ai GPT-Image-1 image editing generator.
3
+
4
+ Edit images using OpenAI's GPT-Image-1 model via fal.ai.
5
+ Based on Fal AI's fal-ai/gpt-image-1/edit-image model.
6
+ See: https://fal.ai/models/fal-ai/gpt-image-1/edit-image
7
+ """
8
+
9
+ import os
10
+ from typing import Literal
11
+
12
+ from pydantic import BaseModel, Field
13
+
14
+ from ....artifacts import ImageArtifact
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class GptImage1EditImageInput(BaseModel):
19
+ """Input schema for GPT-Image-1 image editing.
20
+
21
+ Artifact fields are automatically detected via type introspection
22
+ and resolved from generation IDs to artifact objects.
23
+ """
24
+
25
+ prompt: str = Field(
26
+ description="Edit instruction for transforming the input images",
27
+ min_length=1,
28
+ max_length=32000,
29
+ )
30
+ image_urls: list[ImageArtifact] = Field(
31
+ description="URLs of images to use as reference for editing",
32
+ min_length=1,
33
+ )
34
+ num_images: int = Field(
35
+ default=1,
36
+ ge=1,
37
+ le=4,
38
+ description="Number of edited images to generate (1-4)",
39
+ )
40
+ image_size: Literal["auto", "1024x1024", "1536x1024", "1024x1536"] = Field(
41
+ default="auto",
42
+ description="Size of the output images",
43
+ )
44
+ input_fidelity: Literal["low", "high"] = Field(
45
+ default="low",
46
+ description="How closely to follow the input image",
47
+ )
48
+ quality: Literal["auto", "low", "medium", "high"] = Field(
49
+ default="auto",
50
+ description="Quality level of the output images",
51
+ )
52
+
53
+
54
+ class FalGptImage1EditImageGenerator(BaseGenerator):
55
+ """Generator for OpenAI's GPT-Image-1 image editing via fal.ai."""
56
+
57
+ name = "fal-gpt-image-1-edit-image"
58
+ description = "Fal: GPT-Image-1 Edit - OpenAI's image editing model"
59
+ artifact_type = "image"
60
+
61
+ def get_input_schema(self) -> type[GptImage1EditImageInput]:
62
+ """Return the input schema for this generator."""
63
+ return GptImage1EditImageInput
64
+
65
+ async def generate(
66
+ self, inputs: GptImage1EditImageInput, context: GeneratorExecutionContext
67
+ ) -> GeneratorResult:
68
+ """Generate edited images using fal.ai GPT-Image-1."""
69
+ # Check for API key
70
+ if not os.getenv("FAL_KEY"):
71
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
72
+
73
+ # Import fal_client
74
+ try:
75
+ import fal_client
76
+ except ImportError as e:
77
+ raise ImportError(
78
+ "fal.ai SDK is required for FalGptImage1EditImageGenerator. "
79
+ "Install with: pip install weirdfingers-boards[generators-fal]"
80
+ ) from e
81
+
82
+ # Upload image artifacts to Fal's public storage
83
+ # Fal API requires publicly accessible URLs
84
+ from ..utils import upload_artifacts_to_fal
85
+
86
+ image_urls = await upload_artifacts_to_fal(inputs.image_urls, context)
87
+
88
+ # Prepare arguments for fal.ai API
89
+ arguments = {
90
+ "prompt": inputs.prompt,
91
+ "image_urls": image_urls,
92
+ "num_images": inputs.num_images,
93
+ "image_size": inputs.image_size,
94
+ "input_fidelity": inputs.input_fidelity,
95
+ "quality": inputs.quality,
96
+ }
97
+
98
+ # Submit async job
99
+ handler = await fal_client.submit_async(
100
+ "fal-ai/gpt-image-1/edit-image",
101
+ arguments=arguments,
102
+ )
103
+
104
+ # Store external job ID
105
+ await context.set_external_job_id(handler.request_id)
106
+
107
+ # Stream progress updates
108
+ from .....progress.models import ProgressUpdate
109
+
110
+ event_count = 0
111
+ async for event in handler.iter_events(with_logs=True):
112
+ event_count += 1
113
+ # Sample every 3rd event to avoid spam
114
+ if event_count % 3 == 0:
115
+ logs = getattr(event, "logs", None)
116
+ if logs:
117
+ # Join log entries into a single message
118
+ if isinstance(logs, list):
119
+ message = " | ".join(str(log) for log in logs if log)
120
+ else:
121
+ message = str(logs)
122
+
123
+ if message:
124
+ await context.publish_progress(
125
+ ProgressUpdate(
126
+ job_id=handler.request_id,
127
+ status="processing",
128
+ progress=50.0,
129
+ phase="processing",
130
+ message=message,
131
+ )
132
+ )
133
+
134
+ # Get final result
135
+ result = await handler.get()
136
+
137
+ # Extract images from result
138
+ # Response structure: {"images": [{"url": "...", "width": 1024, "height": 1024, ...}, ...]}
139
+ images = result.get("images", [])
140
+
141
+ if not images:
142
+ raise ValueError("No images returned from fal.ai API")
143
+
144
+ # Store each image using output_index
145
+ artifacts = []
146
+ for idx, image_data in enumerate(images):
147
+ image_url = image_data.get("url")
148
+ width = image_data.get("width", 1024)
149
+ height = image_data.get("height", 1024)
150
+
151
+ if not image_url:
152
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
153
+
154
+ # Determine format from image_size or content_type
155
+ format = "png" # GPT-Image-1 typically returns PNG
156
+ if "content_type" in image_data:
157
+ content_type = image_data["content_type"]
158
+ if "jpeg" in content_type:
159
+ format = "jpeg"
160
+ elif "webp" in content_type:
161
+ format = "webp"
162
+
163
+ artifact = await context.store_image_result(
164
+ storage_url=image_url,
165
+ format=format,
166
+ width=width,
167
+ height=height,
168
+ output_index=idx,
169
+ )
170
+ artifacts.append(artifact)
171
+
172
+ return GeneratorResult(outputs=artifacts)
173
+
174
+ async def estimate_cost(self, inputs: GptImage1EditImageInput) -> float:
175
+ """Estimate cost for GPT-Image-1 edit generation.
176
+
177
+ Note: Pricing information not available in documentation.
178
+ Using estimated cost based on similar OpenAI image models.
179
+ """
180
+ # Estimated cost per image (similar to other image editing models)
181
+ per_image_cost = 0.04
182
+ return per_image_cost * inputs.num_images