@weirdfingers/baseboards 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +4 -1
  2. package/dist/index.js +131 -11
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -1
  5. package/templates/api/alembic/env.py +9 -1
  6. package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
  7. package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
  8. package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
  9. package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
  10. package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
  11. package/templates/api/config/generators.yaml +111 -0
  12. package/templates/api/src/boards/__init__.py +1 -1
  13. package/templates/api/src/boards/api/app.py +2 -1
  14. package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
  15. package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
  16. package/templates/api/src/boards/auth/factory.py +1 -1
  17. package/templates/api/src/boards/dbmodels/__init__.py +8 -22
  18. package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
  19. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
  20. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
  23. package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
  24. package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
  25. package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
  26. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
  27. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
  30. package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
  31. package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
  32. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
  33. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
  34. package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
  35. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
  36. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
  37. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
  38. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
  39. package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
  40. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
  41. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
  42. package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
  43. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
  44. package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
  45. package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
  46. package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
  47. package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
  48. package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
  49. package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
  50. package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
  51. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
  52. package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
  53. package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
  54. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
  55. package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
  56. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
  57. package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
  58. package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
  59. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
  60. package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
  61. package/templates/api/src/boards/graphql/access_control.py +1 -1
  62. package/templates/api/src/boards/graphql/mutations/root.py +16 -4
  63. package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
  64. package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
  65. package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
  66. package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
  67. package/templates/api/src/boards/graphql/types/generation.py +62 -26
  68. package/templates/api/src/boards/middleware.py +1 -1
  69. package/templates/api/src/boards/storage/factory.py +2 -2
  70. package/templates/api/src/boards/tenant_isolation.py +9 -9
  71. package/templates/api/src/boards/workers/actors.py +10 -1
  72. package/templates/web/package.json +1 -1
  73. package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
  74. package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
  75. package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
  76. package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
@@ -0,0 +1,226 @@
1
+ """
2
+ fal.ai nano-banana-pro image editing generator.
3
+
4
+ Edit images using fal.ai's nano-banana-pro/edit model (powered by Google's latest
5
+ image generation and editing model). Specializes in realism and typography.
6
+
7
+ See: https://fal.ai/models/fal-ai/nano-banana-pro/edit
8
+ """
9
+
10
+ import os
11
+ from typing import Literal
12
+
13
+ from pydantic import BaseModel, Field
14
+
15
+ from ....artifacts import ImageArtifact
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class NanoBananaProEditInput(BaseModel):
20
+ """Input schema for nano-banana-pro image editing.
21
+
22
+ Artifact fields (like image_sources) are automatically detected via type
23
+ introspection and resolved from generation IDs to ImageArtifact objects.
24
+ """
25
+
26
+ prompt: str = Field(
27
+ min_length=3,
28
+ max_length=50000,
29
+ description="The prompt for image editing",
30
+ )
31
+ image_sources: list[ImageArtifact] = Field(
32
+ description="List of input images for editing (from previous generations)",
33
+ min_length=1,
34
+ )
35
+ num_images: int = Field(
36
+ default=1,
37
+ ge=1,
38
+ le=4,
39
+ description="Number of images to generate",
40
+ )
41
+ aspect_ratio: Literal[
42
+ "auto",
43
+ "21:9",
44
+ "16:9",
45
+ "3:2",
46
+ "4:3",
47
+ "5:4",
48
+ "1:1",
49
+ "4:5",
50
+ "3:4",
51
+ "2:3",
52
+ "9:16",
53
+ ] = Field(
54
+ default="auto",
55
+ description=(
56
+ "Aspect ratio for generated images. Default is 'auto', which takes one "
57
+ "of the input images' aspect ratio"
58
+ ),
59
+ )
60
+ resolution: Literal["1K", "2K", "4K"] = Field(
61
+ default="1K",
62
+ description="Image resolution (1K, 2K, or 4K)",
63
+ )
64
+ output_format: Literal["jpeg", "png", "webp"] = Field(
65
+ default="png",
66
+ description="Output image format",
67
+ )
68
+ sync_mode: bool = Field(
69
+ default=False,
70
+ description=(
71
+ "If True, the media will be returned as a data URI and the output "
72
+ "data won't be available in the request history"
73
+ ),
74
+ )
75
+ enable_web_search: bool = Field(
76
+ default=False,
77
+ description="Enable web search for generating images with current information",
78
+ )
79
+ limit_generations: bool = Field(
80
+ default=False,
81
+ description=(
82
+ "Experimental parameter to limit the number of generations from each "
83
+ "round of prompting to 1. Set to True to disregard any instructions in "
84
+ "the prompt regarding the number of images to generate"
85
+ ),
86
+ )
87
+
88
+
89
+ class FalNanoBananaProEditGenerator(BaseGenerator):
90
+ """nano-banana-pro image editing generator using fal.ai.
91
+
92
+ Google's state-of-the-art image generation and editing model, specializing
93
+ in realism and typography applications.
94
+ """
95
+
96
+ name = "fal-nano-banana-pro-edit"
97
+ artifact_type = "image"
98
+ description = (
99
+ "Fal: nano-banana-pro edit - Google's state-of-the-art image editing "
100
+ "with excellent realism and typography"
101
+ )
102
+
103
+ def get_input_schema(self) -> type[NanoBananaProEditInput]:
104
+ return NanoBananaProEditInput
105
+
106
+ async def generate(
107
+ self, inputs: NanoBananaProEditInput, context: GeneratorExecutionContext
108
+ ) -> GeneratorResult:
109
+ """Edit images using fal.ai nano-banana-pro/edit model."""
110
+ # Check for API key (fal-client uses FAL_KEY environment variable)
111
+ if not os.getenv("FAL_KEY"):
112
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
113
+
114
+ # Import fal_client
115
+ try:
116
+ import fal_client
117
+ except ImportError as e:
118
+ raise ImportError(
119
+ "fal.ai SDK is required for FalNanoBananaProEditGenerator. "
120
+ "Install with: pip install weirdfingers-boards[generators-fal]"
121
+ ) from e
122
+
123
+ # Upload image artifacts to Fal's public storage
124
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
125
+ # - Localhost URLs (not publicly accessible)
126
+ # - Private S3 buckets (not publicly accessible)
127
+ # So we upload to Fal's temporary storage first
128
+ from ..utils import upload_artifacts_to_fal
129
+
130
+ image_urls = await upload_artifacts_to_fal(inputs.image_sources, context)
131
+
132
+ # Prepare arguments for fal.ai API
133
+ arguments = {
134
+ "prompt": inputs.prompt,
135
+ "image_urls": image_urls,
136
+ "num_images": inputs.num_images,
137
+ "aspect_ratio": inputs.aspect_ratio,
138
+ "resolution": inputs.resolution,
139
+ "output_format": inputs.output_format,
140
+ "sync_mode": inputs.sync_mode,
141
+ "enable_web_search": inputs.enable_web_search,
142
+ "limit_generations": inputs.limit_generations,
143
+ }
144
+
145
+ # Submit async job and get handler
146
+ handler = await fal_client.submit_async(
147
+ "fal-ai/nano-banana-pro/edit",
148
+ arguments=arguments,
149
+ )
150
+
151
+ # Store the external job ID for tracking
152
+ await context.set_external_job_id(handler.request_id)
153
+
154
+ # Stream progress updates (sample every 3rd event to avoid spam)
155
+ from .....progress.models import ProgressUpdate
156
+
157
+ event_count = 0
158
+ async for event in handler.iter_events(with_logs=True):
159
+ event_count += 1
160
+
161
+ # Process every 3rd event to provide feedback without overwhelming
162
+ if event_count % 3 == 0:
163
+ # Extract logs if available
164
+ logs = getattr(event, "logs", None)
165
+ if logs:
166
+ # Join log entries into a single message
167
+ if isinstance(logs, list):
168
+ message = " | ".join(str(log) for log in logs if log)
169
+ else:
170
+ message = str(logs)
171
+
172
+ if message:
173
+ await context.publish_progress(
174
+ ProgressUpdate(
175
+ job_id=handler.request_id,
176
+ status="processing",
177
+ progress=50.0, # Approximate mid-point progress
178
+ phase="processing",
179
+ message=message,
180
+ )
181
+ )
182
+
183
+ # Get final result
184
+ result = await handler.get()
185
+
186
+ # Extract image URLs and description from result
187
+ # fal.ai returns: {
188
+ # "images": [{"url": "...", "width": ..., "height": ...}, ...],
189
+ # "description": "Text description from the model"
190
+ # }
191
+ images = result.get("images", [])
192
+
193
+ if not images:
194
+ raise ValueError("No images returned from fal.ai API")
195
+
196
+ # Store each image using output_index
197
+ artifacts = []
198
+ for idx, image_data in enumerate(images):
199
+ image_url = image_data.get("url")
200
+ # Extract dimensions if available, otherwise use sensible defaults
201
+ width = image_data.get("width", 1024)
202
+ height = image_data.get("height", 1024)
203
+
204
+ if not image_url:
205
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
206
+
207
+ # Store with appropriate output_index
208
+ artifact = await context.store_image_result(
209
+ storage_url=image_url,
210
+ format=inputs.output_format,
211
+ width=width,
212
+ height=height,
213
+ output_index=idx,
214
+ )
215
+ artifacts.append(artifact)
216
+
217
+ return GeneratorResult(outputs=artifacts)
218
+
219
+ async def estimate_cost(self, inputs: NanoBananaProEditInput) -> float:
220
+ """Estimate cost for nano-banana-pro edit generation.
221
+
222
+ nano-banana-pro/edit uses Google's latest image editing model.
223
+ Using the same pricing as nano-banana-pro text-to-image.
224
+ """
225
+ # $0.039 per image
226
+ return 0.039 * inputs.num_images
@@ -0,0 +1,249 @@
1
+ """
2
+ fal.ai qwen-image text-to-image generator.
3
+
4
+ Qwen-Image is an advanced image generation model with exceptional text rendering
5
+ and precise editing capabilities. Based on Fal AI's fal-ai/qwen-image model.
6
+
7
+ See: https://fal.ai/models/fal-ai/qwen-image
8
+ """
9
+
10
+ import os
11
+ from typing import Literal
12
+
13
+ from pydantic import BaseModel, Field
14
+
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class LoraConfig(BaseModel):
19
+ """LoRA configuration for model fine-tuning."""
20
+
21
+ path: str = Field(description="Path or URL to LoRA weights")
22
+ scale: float = Field(
23
+ default=1.0,
24
+ ge=0.0,
25
+ le=4.0,
26
+ description="Scale factor for LoRA influence (0-4)",
27
+ )
28
+
29
+
30
+ class CustomImageSize(BaseModel):
31
+ """Custom image dimensions."""
32
+
33
+ width: int = Field(description="Image width in pixels")
34
+ height: int = Field(description="Image height in pixels")
35
+
36
+
37
+ class QwenImageInput(BaseModel):
38
+ """Input schema for qwen-image generation.
39
+
40
+ Qwen-Image supports advanced text rendering and precise image editing capabilities.
41
+ """
42
+
43
+ prompt: str = Field(description="Text prompt for image generation")
44
+ num_images: int = Field(
45
+ default=1,
46
+ ge=1,
47
+ le=4,
48
+ description="Number of images to generate (1-4)",
49
+ )
50
+ num_inference_steps: int = Field(
51
+ default=30,
52
+ ge=2,
53
+ le=250,
54
+ description="Number of inference steps (more steps = higher quality but slower)",
55
+ )
56
+ image_size: (
57
+ Literal[
58
+ "square_hd",
59
+ "square",
60
+ "portrait_4_3",
61
+ "portrait_16_9",
62
+ "landscape_4_3",
63
+ "landscape_16_9",
64
+ ]
65
+ | CustomImageSize
66
+ ) = Field(
67
+ default="landscape_4_3",
68
+ description="Image aspect ratio preset or custom dimensions",
69
+ )
70
+ output_format: Literal["jpeg", "png"] = Field(
71
+ default="png",
72
+ description="Output image format",
73
+ )
74
+ guidance_scale: float = Field(
75
+ default=2.5,
76
+ ge=0.0,
77
+ le=20.0,
78
+ description="Guidance scale for prompt adherence (0-20)",
79
+ )
80
+ seed: int | None = Field(
81
+ default=None,
82
+ description="Random seed for reproducibility (optional)",
83
+ )
84
+ negative_prompt: str = Field(
85
+ default=" ",
86
+ description="Negative prompt to specify unwanted elements",
87
+ )
88
+ acceleration: Literal["none", "regular", "high"] = Field(
89
+ default="none",
90
+ description="Acceleration level for faster generation",
91
+ )
92
+ enable_safety_checker: bool = Field(
93
+ default=True,
94
+ description="Enable safety checker to filter NSFW content",
95
+ )
96
+ use_turbo: bool = Field(
97
+ default=False,
98
+ description="Enable turbo mode for faster generation (10 steps, CFG=1.2)",
99
+ )
100
+ sync_mode: bool = Field(
101
+ default=False,
102
+ description="Use synchronous mode (wait for completion)",
103
+ )
104
+ loras: list[LoraConfig] = Field(
105
+ default=[],
106
+ max_length=3,
107
+ description="LoRA configurations (up to 3 can be merged)",
108
+ )
109
+
110
+
111
+ class FalQwenImageGenerator(BaseGenerator):
112
+ """Qwen-Image generator using fal.ai.
113
+
114
+ Advanced image generation with exceptional text rendering and editing capabilities.
115
+ """
116
+
117
+ name = "fal-qwen-image"
118
+ artifact_type = "image"
119
+ description = "Fal: Qwen-Image - advanced text-to-image with exceptional text rendering"
120
+
121
+ def get_input_schema(self) -> type[QwenImageInput]:
122
+ return QwenImageInput
123
+
124
+ async def generate(
125
+ self, inputs: QwenImageInput, context: GeneratorExecutionContext
126
+ ) -> GeneratorResult:
127
+ """Generate images using fal.ai qwen-image model."""
128
+ # Check for API key (fal-client uses FAL_KEY environment variable)
129
+ if not os.getenv("FAL_KEY"):
130
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
131
+
132
+ # Import fal_client
133
+ try:
134
+ import fal_client
135
+ except ImportError as e:
136
+ raise ImportError(
137
+ "fal.ai SDK is required for FalQwenImageGenerator. "
138
+ "Install with: pip install weirdfingers-boards[generators-fal]"
139
+ ) from e
140
+
141
+ # Prepare arguments for fal.ai API
142
+ arguments = {
143
+ "prompt": inputs.prompt,
144
+ "num_images": inputs.num_images,
145
+ "num_inference_steps": inputs.num_inference_steps,
146
+ "output_format": inputs.output_format,
147
+ "guidance_scale": inputs.guidance_scale,
148
+ "negative_prompt": inputs.negative_prompt,
149
+ "acceleration": inputs.acceleration,
150
+ "enable_safety_checker": inputs.enable_safety_checker,
151
+ "use_turbo": inputs.use_turbo,
152
+ "sync_mode": inputs.sync_mode,
153
+ }
154
+
155
+ # Handle image_size: can be string or custom dimensions
156
+ if isinstance(inputs.image_size, str):
157
+ arguments["image_size"] = inputs.image_size
158
+ else:
159
+ # CustomImageSize object
160
+ arguments["image_size"] = {
161
+ "width": inputs.image_size.width,
162
+ "height": inputs.image_size.height,
163
+ }
164
+
165
+ # Add seed if provided
166
+ if inputs.seed is not None:
167
+ arguments["seed"] = inputs.seed
168
+
169
+ # Add LoRAs if provided
170
+ if inputs.loras:
171
+ arguments["loras"] = [{"path": lora.path, "scale": lora.scale} for lora in inputs.loras]
172
+
173
+ # Submit async job and get handler
174
+ handler = await fal_client.submit_async(
175
+ "fal-ai/qwen-image",
176
+ arguments=arguments,
177
+ )
178
+
179
+ # Store the external job ID for tracking
180
+ await context.set_external_job_id(handler.request_id)
181
+
182
+ # Stream progress updates (sample every 3rd event to avoid spam)
183
+ from .....progress.models import ProgressUpdate
184
+
185
+ event_count = 0
186
+ async for event in handler.iter_events(with_logs=True):
187
+ event_count += 1
188
+
189
+ # Process every 3rd event to provide feedback without overwhelming
190
+ if event_count % 3 == 0:
191
+ # Extract logs if available
192
+ logs = getattr(event, "logs", None)
193
+ if logs:
194
+ # Join log entries into a single message
195
+ if isinstance(logs, list):
196
+ message = " | ".join(str(log) for log in logs if log)
197
+ else:
198
+ message = str(logs)
199
+
200
+ if message:
201
+ await context.publish_progress(
202
+ ProgressUpdate(
203
+ job_id=handler.request_id,
204
+ status="processing",
205
+ progress=50.0, # Approximate mid-point progress
206
+ phase="processing",
207
+ message=message,
208
+ )
209
+ )
210
+
211
+ # Get final result
212
+ result = await handler.get()
213
+
214
+ # Extract image URLs from result
215
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ...}, ...]}
216
+ images = result.get("images", [])
217
+ if not images:
218
+ raise ValueError("No images returned from fal.ai API")
219
+
220
+ # Store each image using output_index
221
+ artifacts = []
222
+ for idx, image_data in enumerate(images):
223
+ image_url = image_data.get("url")
224
+ # Optional width and height
225
+ width = image_data.get("width")
226
+ height = image_data.get("height")
227
+
228
+ if not image_url:
229
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
230
+
231
+ # Store with appropriate output_index
232
+ artifact = await context.store_image_result(
233
+ storage_url=image_url,
234
+ format=inputs.output_format,
235
+ width=width,
236
+ height=height,
237
+ output_index=idx,
238
+ )
239
+ artifacts.append(artifact)
240
+
241
+ return GeneratorResult(outputs=artifacts)
242
+
243
+ async def estimate_cost(self, inputs: QwenImageInput) -> float:
244
+ """Estimate cost for qwen-image generation.
245
+
246
+ Qwen-image pricing is approximately $0.05 per image based on similar
247
+ high-quality text-to-image models on fal.ai.
248
+ """
249
+ return 0.05 * inputs.num_images # $0.05 per image, scaled by batch size