@weirdfingers/baseboards 0.6.2 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/index.js +54 -28
  2. package/dist/index.js.map +1 -1
  3. package/package.json +1 -1
  4. package/templates/README.md +2 -0
  5. package/templates/api/.env.example +3 -0
  6. package/templates/api/config/generators.yaml +58 -0
  7. package/templates/api/pyproject.toml +1 -1
  8. package/templates/api/src/boards/__init__.py +1 -1
  9. package/templates/api/src/boards/api/endpoints/storage.py +85 -4
  10. package/templates/api/src/boards/api/endpoints/uploads.py +1 -2
  11. package/templates/api/src/boards/database/connection.py +98 -58
  12. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +4 -0
  13. package/templates/api/src/boards/generators/implementations/fal/audio/chatterbox_text_to_speech.py +176 -0
  14. package/templates/api/src/boards/generators/implementations/fal/audio/chatterbox_tts_turbo.py +195 -0
  15. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +14 -0
  16. package/templates/api/src/boards/generators/implementations/fal/image/bytedance_seedream_v45_edit.py +219 -0
  17. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image_edit.py +208 -0
  18. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_15_edit.py +216 -0
  19. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_5.py +177 -0
  20. package/templates/api/src/boards/generators/implementations/fal/image/reve_edit.py +178 -0
  21. package/templates/api/src/boards/generators/implementations/fal/image/reve_text_to_image.py +155 -0
  22. package/templates/api/src/boards/generators/implementations/fal/image/seedream_v45_text_to_image.py +180 -0
  23. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +18 -0
  24. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_ai_avatar_v2_pro.py +168 -0
  25. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_ai_avatar_v2_standard.py +159 -0
  26. package/templates/api/src/boards/generators/implementations/fal/video/veed_fabric_1_0.py +180 -0
  27. package/templates/api/src/boards/generators/implementations/fal/video/veo31.py +190 -0
  28. package/templates/api/src/boards/generators/implementations/fal/video/veo31_fast.py +190 -0
  29. package/templates/api/src/boards/generators/implementations/fal/video/veo31_fast_image_to_video.py +191 -0
  30. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +13 -6
  31. package/templates/api/src/boards/generators/implementations/fal/video/wan_25_preview_image_to_video.py +212 -0
  32. package/templates/api/src/boards/generators/implementations/fal/video/wan_25_preview_text_to_video.py +208 -0
  33. package/templates/api/src/boards/generators/implementations/kie/__init__.py +11 -0
  34. package/templates/api/src/boards/generators/implementations/kie/base.py +316 -0
  35. package/templates/api/src/boards/generators/implementations/kie/image/__init__.py +3 -0
  36. package/templates/api/src/boards/generators/implementations/kie/image/nano_banana_edit.py +190 -0
  37. package/templates/api/src/boards/generators/implementations/kie/utils.py +98 -0
  38. package/templates/api/src/boards/generators/implementations/kie/video/__init__.py +8 -0
  39. package/templates/api/src/boards/generators/implementations/kie/video/veo3.py +161 -0
  40. package/templates/api/src/boards/graphql/resolvers/upload.py +1 -1
  41. package/templates/web/package.json +4 -1
  42. package/templates/web/src/app/boards/[boardId]/page.tsx +156 -24
  43. package/templates/web/src/app/globals.css +3 -0
  44. package/templates/web/src/app/layout.tsx +15 -5
  45. package/templates/web/src/components/boards/ArtifactInputSlots.tsx +9 -9
  46. package/templates/web/src/components/boards/ArtifactPreview.tsx +34 -18
  47. package/templates/web/src/components/boards/GenerationGrid.tsx +101 -7
  48. package/templates/web/src/components/boards/GenerationInput.tsx +21 -21
  49. package/templates/web/src/components/boards/GeneratorSelector.tsx +232 -30
  50. package/templates/web/src/components/boards/UploadArtifact.tsx +385 -75
  51. package/templates/web/src/components/header.tsx +3 -1
  52. package/templates/web/src/components/theme-provider.tsx +10 -0
  53. package/templates/web/src/components/theme-toggle.tsx +75 -0
  54. package/templates/web/src/components/ui/alert-dialog.tsx +157 -0
  55. package/templates/web/src/components/ui/toast.tsx +128 -0
  56. package/templates/web/src/components/ui/toaster.tsx +35 -0
  57. package/templates/web/src/components/ui/use-toast.ts +186 -0
@@ -0,0 +1,216 @@
1
+ """
2
+ fal.ai GPT-Image-1.5 image editing generator.
3
+
4
+ Edit images using OpenAI's GPT-Image-1.5 model via fal.ai.
5
+ Based on Fal AI's fal-ai/gpt-image-1.5/edit model.
6
+ See: https://fal.ai/models/fal-ai/gpt-image-1.5/edit
7
+ """
8
+
9
+ import os
10
+ from typing import Literal
11
+
12
+ from pydantic import BaseModel, Field
13
+
14
+ from ....artifacts import ImageArtifact
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class GptImage15EditInput(BaseModel):
19
+ """Input schema for GPT-Image-1.5 image editing.
20
+
21
+ Artifact fields are automatically detected via type introspection
22
+ and resolved from generation IDs to artifact objects.
23
+ """
24
+
25
+ prompt: str = Field(
26
+ description="Edit instruction for transforming the input images",
27
+ min_length=2,
28
+ max_length=32000,
29
+ )
30
+ image_urls: list[ImageArtifact] = Field(
31
+ description="URLs of images to use as reference for editing",
32
+ min_length=1,
33
+ )
34
+ mask_image_url: ImageArtifact | None = Field(
35
+ default=None,
36
+ description="Optional mask image to specify the area to edit",
37
+ )
38
+ num_images: int = Field(
39
+ default=1,
40
+ ge=1,
41
+ le=4,
42
+ description="Number of edited images to generate (1-4)",
43
+ )
44
+ image_size: Literal["auto", "1024x1024", "1536x1024", "1024x1536"] = Field(
45
+ default="auto",
46
+ description="Size of the output images",
47
+ )
48
+ quality: Literal["low", "medium", "high"] = Field(
49
+ default="high",
50
+ description="Quality level of the output images",
51
+ )
52
+ input_fidelity: Literal["low", "high"] = Field(
53
+ default="high",
54
+ description="How closely to follow the input image",
55
+ )
56
+ output_format: Literal["jpeg", "png", "webp"] = Field(
57
+ default="png",
58
+ description="Output image format",
59
+ )
60
+ background: Literal["auto", "transparent", "opaque"] = Field(
61
+ default="auto",
62
+ description="Background handling for the output images",
63
+ )
64
+
65
+
66
+ class FalGptImage15EditGenerator(BaseGenerator):
67
+ """Generator for OpenAI's GPT-Image-1.5 image editing via fal.ai."""
68
+
69
+ name = "fal-gpt-image-15-edit"
70
+ description = "Fal: GPT-Image-1.5 Edit - OpenAI's latest image editing model"
71
+ artifact_type = "image"
72
+
73
+ def get_input_schema(self) -> type[GptImage15EditInput]:
74
+ """Return the input schema for this generator."""
75
+ return GptImage15EditInput
76
+
77
+ async def generate(
78
+ self, inputs: GptImage15EditInput, context: GeneratorExecutionContext
79
+ ) -> GeneratorResult:
80
+ """Generate edited images using fal.ai GPT-Image-1.5."""
81
+ # Check for API key
82
+ if not os.getenv("FAL_KEY"):
83
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
84
+
85
+ # Import fal_client
86
+ try:
87
+ import fal_client
88
+ except ImportError as e:
89
+ raise ImportError(
90
+ "fal.ai SDK is required for FalGptImage15EditGenerator. "
91
+ "Install with: pip install weirdfingers-boards[generators-fal]"
92
+ ) from e
93
+
94
+ # Upload image artifacts to Fal's public storage
95
+ # Fal API requires publicly accessible URLs
96
+ from ..utils import upload_artifacts_to_fal
97
+
98
+ image_urls = await upload_artifacts_to_fal(inputs.image_urls, context)
99
+
100
+ # Upload mask image if provided
101
+ mask_image_url = None
102
+ if inputs.mask_image_url is not None:
103
+ mask_urls = await upload_artifacts_to_fal([inputs.mask_image_url], context)
104
+ mask_image_url = mask_urls[0] if mask_urls else None
105
+
106
+ # Prepare arguments for fal.ai API
107
+ arguments: dict = {
108
+ "prompt": inputs.prompt,
109
+ "image_urls": image_urls,
110
+ "num_images": inputs.num_images,
111
+ "image_size": inputs.image_size,
112
+ "quality": inputs.quality,
113
+ "input_fidelity": inputs.input_fidelity,
114
+ "output_format": inputs.output_format,
115
+ "background": inputs.background,
116
+ }
117
+
118
+ # Add mask image if provided
119
+ if mask_image_url is not None:
120
+ arguments["mask_image_url"] = mask_image_url
121
+
122
+ # Submit async job
123
+ handler = await fal_client.submit_async(
124
+ "fal-ai/gpt-image-1.5/edit",
125
+ arguments=arguments,
126
+ )
127
+
128
+ # Store external job ID
129
+ await context.set_external_job_id(handler.request_id)
130
+
131
+ # Stream progress updates
132
+ from .....progress.models import ProgressUpdate
133
+
134
+ event_count = 0
135
+ async for event in handler.iter_events(with_logs=True):
136
+ event_count += 1
137
+ # Sample every 3rd event to avoid spam
138
+ if event_count % 3 == 0:
139
+ logs = getattr(event, "logs", None)
140
+ if logs:
141
+ # Join log entries into a single message
142
+ if isinstance(logs, list):
143
+ message = " | ".join(str(log) for log in logs if log)
144
+ else:
145
+ message = str(logs)
146
+
147
+ if message:
148
+ await context.publish_progress(
149
+ ProgressUpdate(
150
+ job_id=handler.request_id,
151
+ status="processing",
152
+ progress=50.0,
153
+ phase="processing",
154
+ message=message,
155
+ )
156
+ )
157
+
158
+ # Get final result
159
+ result = await handler.get()
160
+
161
+ # Extract images from result
162
+ # Response structure: {"images": [{"url": "...", "width": 1024, "height": 1024, ...}, ...]}
163
+ images = result.get("images", [])
164
+
165
+ if not images:
166
+ raise ValueError("No images returned from fal.ai API")
167
+
168
+ # Store each image using output_index
169
+ artifacts = []
170
+ for idx, image_data in enumerate(images):
171
+ image_url = image_data.get("url")
172
+ width = image_data.get("width", 1024)
173
+ height = image_data.get("height", 1024)
174
+
175
+ if not image_url:
176
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
177
+
178
+ # Determine format from content_type if available, otherwise use input format
179
+ format = inputs.output_format
180
+ if "content_type" in image_data:
181
+ content_type = image_data["content_type"]
182
+ if "jpeg" in content_type:
183
+ format = "jpeg"
184
+ elif "webp" in content_type:
185
+ format = "webp"
186
+ elif "png" in content_type:
187
+ format = "png"
188
+
189
+ artifact = await context.store_image_result(
190
+ storage_url=image_url,
191
+ format=format,
192
+ width=width,
193
+ height=height,
194
+ output_index=idx,
195
+ )
196
+ artifacts.append(artifact)
197
+
198
+ return GeneratorResult(outputs=artifacts)
199
+
200
+ async def estimate_cost(self, inputs: GptImage15EditInput) -> float:
201
+ """Estimate cost for GPT-Image-1.5 edit generation.
202
+
203
+ Pricing varies by quality and image size:
204
+ - Low Quality: $0.009-$0.013 per image
205
+ - Medium Quality: $0.034-$0.051 per image
206
+ - High Quality: $0.133-$0.200 per image
207
+ """
208
+ # Base costs by quality (using 1024x1024 as reference)
209
+ quality_costs = {
210
+ "low": 0.011, # Average of $0.009-$0.013
211
+ "medium": 0.045, # Average of $0.034-$0.051
212
+ "high": 0.177, # Average of $0.133-$0.200
213
+ }
214
+
215
+ per_image_cost = quality_costs.get(inputs.quality, 0.177)
216
+ return per_image_cost * inputs.num_images
@@ -0,0 +1,177 @@
1
+ """
2
+ fal.ai GPT Image 1.5 text-to-image generator.
3
+
4
+ Generate high-fidelity images using GPT Image 1.5 with strong prompt adherence,
5
+ preserving composition, lighting, and fine-grained detail.
6
+
7
+ Based on Fal AI's fal-ai/gpt-image-1.5 model.
8
+ See: https://fal.ai/models/fal-ai/gpt-image-1.5
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class GptImage15Input(BaseModel):
20
+ """Input schema for GPT Image 1.5.
21
+
22
+ Artifact fields are automatically detected via type introspection
23
+ and resolved from generation IDs to artifact objects.
24
+ """
25
+
26
+ prompt: str = Field(
27
+ description="The prompt for image generation",
28
+ min_length=2,
29
+ )
30
+ num_images: int = Field(
31
+ default=1,
32
+ ge=1,
33
+ le=4,
34
+ description="Number of images to generate",
35
+ )
36
+ image_size: Literal["1024x1024", "1536x1024", "1024x1536"] = Field(
37
+ default="1024x1024",
38
+ description="Aspect ratio for the generated image",
39
+ )
40
+ background: Literal["auto", "transparent", "opaque"] = Field(
41
+ default="auto",
42
+ description="Background for the generated image",
43
+ )
44
+ quality: Literal["low", "medium", "high"] = Field(
45
+ default="high",
46
+ description="Quality for the generated image",
47
+ )
48
+ output_format: Literal["jpeg", "png", "webp"] = Field(
49
+ default="png",
50
+ description="Output format for the images",
51
+ )
52
+
53
+
54
+ class FalGptImage15Generator(BaseGenerator):
55
+ """GPT Image 1.5 text-to-image generator using fal.ai."""
56
+
57
+ name = "fal-gpt-image-1-5"
58
+ artifact_type = "image"
59
+ description = "Fal: GPT Image 1.5 - High-fidelity image generation with strong prompt adherence"
60
+
61
+ def get_input_schema(self) -> type[GptImage15Input]:
62
+ return GptImage15Input
63
+
64
+ async def generate(
65
+ self, inputs: GptImage15Input, context: GeneratorExecutionContext
66
+ ) -> GeneratorResult:
67
+ """Generate images using fal.ai gpt-image-1.5 model."""
68
+ # Check for API key (fal-client uses FAL_KEY environment variable)
69
+ if not os.getenv("FAL_KEY"):
70
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
71
+
72
+ # Import fal_client
73
+ try:
74
+ import fal_client
75
+ except ImportError as e:
76
+ raise ImportError(
77
+ "fal.ai SDK is required for FalGptImage15Generator. "
78
+ "Install with: pip install weirdfingers-boards[generators-fal]"
79
+ ) from e
80
+
81
+ # Prepare arguments for fal.ai API
82
+ arguments = {
83
+ "prompt": inputs.prompt,
84
+ "num_images": inputs.num_images,
85
+ "image_size": inputs.image_size,
86
+ "background": inputs.background,
87
+ "quality": inputs.quality,
88
+ "output_format": inputs.output_format,
89
+ }
90
+
91
+ # Submit async job and get handler
92
+ handler = await fal_client.submit_async(
93
+ "fal-ai/gpt-image-1.5",
94
+ arguments=arguments,
95
+ )
96
+
97
+ # Store the external job ID for tracking
98
+ await context.set_external_job_id(handler.request_id)
99
+
100
+ # Stream progress updates (sample every 3rd event to avoid spam)
101
+ from .....progress.models import ProgressUpdate
102
+
103
+ event_count = 0
104
+ async for event in handler.iter_events(with_logs=True):
105
+ event_count += 1
106
+
107
+ # Process every 3rd event to provide feedback without overwhelming
108
+ if event_count % 3 == 0:
109
+ # Extract logs if available
110
+ logs = getattr(event, "logs", None)
111
+ if logs:
112
+ # Join log entries into a single message
113
+ if isinstance(logs, list):
114
+ message = " | ".join(str(log) for log in logs if log)
115
+ else:
116
+ message = str(logs)
117
+
118
+ if message:
119
+ await context.publish_progress(
120
+ ProgressUpdate(
121
+ job_id=handler.request_id,
122
+ status="processing",
123
+ progress=50.0, # Approximate mid-point progress
124
+ phase="processing",
125
+ message=message,
126
+ )
127
+ )
128
+
129
+ # Get final result
130
+ result = await handler.get()
131
+
132
+ # Extract image URLs from result
133
+ # fal.ai returns: {
134
+ # "images": [{"url": "...", "content_type": "...", "width": ..., "height": ...}, ...]
135
+ # }
136
+ images = result.get("images", [])
137
+
138
+ if not images:
139
+ raise ValueError("No images returned from fal.ai API")
140
+
141
+ # Parse target dimensions from image_size
142
+ size_parts = inputs.image_size.split("x")
143
+ target_width = int(size_parts[0])
144
+ target_height = int(size_parts[1])
145
+
146
+ # Store each image using output_index
147
+ artifacts = []
148
+ for idx, image_data in enumerate(images):
149
+ image_url = image_data.get("url")
150
+ # Extract dimensions if available, otherwise use target dimensions from input
151
+ width = image_data.get("width") or target_width
152
+ height = image_data.get("height") or target_height
153
+
154
+ if not image_url:
155
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
156
+
157
+ # Store with appropriate output_index
158
+ artifact = await context.store_image_result(
159
+ storage_url=image_url,
160
+ format=inputs.output_format,
161
+ width=width,
162
+ height=height,
163
+ output_index=idx,
164
+ )
165
+ artifacts.append(artifact)
166
+
167
+ return GeneratorResult(outputs=artifacts)
168
+
169
+ async def estimate_cost(self, inputs: GptImage15Input) -> float:
170
+ """Estimate cost for GPT Image 1.5 generation.
171
+
172
+ Using estimated cost per image (pricing not documented).
173
+ GPT Image 1.5 is a higher-quality model compared to mini version.
174
+ """
175
+ # Estimated cost per image
176
+ per_image_cost = 0.04
177
+ return per_image_cost * inputs.num_images
@@ -0,0 +1,178 @@
1
+ """
2
+ fal.ai Reve image editing generator.
3
+
4
+ Edit images using fal.ai's Reve edit model.
5
+ Allows uploading an existing image and transforming it through text prompts.
6
+
7
+ See: https://fal.ai/models/fal-ai/reve/edit
8
+ """
9
+
10
+ import os
11
+ from typing import Literal
12
+
13
+ from pydantic import BaseModel, Field
14
+
15
+ from ....artifacts import ImageArtifact
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class ReveEditInput(BaseModel):
20
+ """Input schema for Reve image editing.
21
+
22
+ Artifact fields (like image_url) are automatically detected via type
23
+ introspection and resolved from generation IDs to ImageArtifact objects.
24
+ """
25
+
26
+ prompt: str = Field(
27
+ description="Text describing how to edit the image",
28
+ min_length=1,
29
+ max_length=2560,
30
+ )
31
+ image_url: ImageArtifact = Field(
32
+ description="Reference image to edit (from a previous generation)",
33
+ )
34
+ num_images: int = Field(
35
+ default=1,
36
+ ge=1,
37
+ le=4,
38
+ description="Number of images to generate",
39
+ )
40
+ output_format: Literal["png", "jpeg", "webp"] = Field(
41
+ default="png",
42
+ description="Output image format",
43
+ )
44
+ sync_mode: bool = Field(
45
+ default=False,
46
+ description=(
47
+ "If True, the media will be returned as a data URI and the output "
48
+ "data won't be available in the request history"
49
+ ),
50
+ )
51
+
52
+
53
+ class FalReveEditGenerator(BaseGenerator):
54
+ """Reve image editing generator using fal.ai."""
55
+
56
+ name = "fal-reve-edit"
57
+ artifact_type = "image"
58
+ description = "Fal: Reve edit - AI-powered image editing and transformation"
59
+
60
+ def get_input_schema(self) -> type[ReveEditInput]:
61
+ return ReveEditInput
62
+
63
+ async def generate(
64
+ self, inputs: ReveEditInput, context: GeneratorExecutionContext
65
+ ) -> GeneratorResult:
66
+ """Edit images using fal.ai Reve edit model."""
67
+ # Check for API key (fal-client uses FAL_KEY environment variable)
68
+ if not os.getenv("FAL_KEY"):
69
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
70
+
71
+ # Import fal_client
72
+ try:
73
+ import fal_client
74
+ except ImportError as e:
75
+ raise ImportError(
76
+ "fal.ai SDK is required for FalReveEditGenerator. "
77
+ "Install with: pip install weirdfingers-boards[generators-fal]"
78
+ ) from e
79
+
80
+ # Upload image artifact to Fal's public storage
81
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
82
+ # - Localhost URLs (not publicly accessible)
83
+ # - Private S3 buckets (not publicly accessible)
84
+ # So we upload to Fal's temporary storage first
85
+ from ..utils import upload_artifacts_to_fal
86
+
87
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
88
+ uploaded_image_url = image_urls[0]
89
+
90
+ # Prepare arguments for fal.ai API
91
+ arguments = {
92
+ "prompt": inputs.prompt,
93
+ "image_url": uploaded_image_url,
94
+ "num_images": inputs.num_images,
95
+ "output_format": inputs.output_format,
96
+ "sync_mode": inputs.sync_mode,
97
+ }
98
+
99
+ # Submit async job and get handler
100
+ handler = await fal_client.submit_async(
101
+ "fal-ai/reve/edit",
102
+ arguments=arguments,
103
+ )
104
+
105
+ # Store the external job ID for tracking
106
+ await context.set_external_job_id(handler.request_id)
107
+
108
+ # Stream progress updates (sample every 3rd event to avoid spam)
109
+ from .....progress.models import ProgressUpdate
110
+
111
+ event_count = 0
112
+ async for event in handler.iter_events(with_logs=True):
113
+ event_count += 1
114
+
115
+ # Process every 3rd event to provide feedback without overwhelming
116
+ if event_count % 3 == 0:
117
+ # Extract logs if available
118
+ logs = getattr(event, "logs", None)
119
+ if logs:
120
+ # Join log entries into a single message
121
+ if isinstance(logs, list):
122
+ message = " | ".join(str(log) for log in logs if log)
123
+ else:
124
+ message = str(logs)
125
+
126
+ if message:
127
+ await context.publish_progress(
128
+ ProgressUpdate(
129
+ job_id=handler.request_id,
130
+ status="processing",
131
+ progress=50.0, # Approximate mid-point progress
132
+ phase="processing",
133
+ message=message,
134
+ )
135
+ )
136
+
137
+ # Get final result
138
+ result = await handler.get()
139
+
140
+ # Extract image URLs from result
141
+ # fal.ai returns: {
142
+ # "images": [{"url": "...", "width": ..., "height": ..., ...}, ...]
143
+ # }
144
+ images = result.get("images", [])
145
+
146
+ if not images:
147
+ raise ValueError("No images returned from fal.ai API")
148
+
149
+ # Store each image using output_index
150
+ artifacts = []
151
+ for idx, image_data in enumerate(images):
152
+ image_url = image_data.get("url")
153
+ # Extract dimensions if available, otherwise use sensible defaults
154
+ width = image_data.get("width", 1024)
155
+ height = image_data.get("height", 1024)
156
+
157
+ if not image_url:
158
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
159
+
160
+ # Store with appropriate output_index
161
+ artifact = await context.store_image_result(
162
+ storage_url=image_url,
163
+ format=inputs.output_format,
164
+ width=width,
165
+ height=height,
166
+ output_index=idx,
167
+ )
168
+ artifacts.append(artifact)
169
+
170
+ return GeneratorResult(outputs=artifacts)
171
+
172
+ async def estimate_cost(self, inputs: ReveEditInput) -> float:
173
+ """Estimate cost for Reve edit generation.
174
+
175
+ Reve edit pricing is $0.04 per image generated.
176
+ """
177
+ per_image_cost = 0.04
178
+ return per_image_cost * inputs.num_images