@weirdfingers/baseboards 0.5.3 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +1 -1
- package/templates/api/alembic/env.py +9 -1
- package/templates/api/alembic/versions/20250101_000000_initial_schema.py +107 -49
- package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +7 -3
- package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +57 -1
- package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +134 -0
- package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +8 -5
- package/templates/api/config/generators.yaml +111 -0
- package/templates/api/src/boards/__init__.py +1 -1
- package/templates/api/src/boards/api/app.py +2 -1
- package/templates/api/src/boards/api/endpoints/tenant_registration.py +1 -1
- package/templates/api/src/boards/api/endpoints/uploads.py +150 -0
- package/templates/api/src/boards/auth/factory.py +1 -1
- package/templates/api/src/boards/dbmodels/__init__.py +8 -22
- package/templates/api/src/boards/generators/artifact_resolution.py +45 -12
- package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +16 -1
- package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +171 -0
- package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +167 -0
- package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +194 -0
- package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +209 -0
- package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +206 -0
- package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +237 -0
- package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +1 -1
- package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +30 -0
- package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +220 -0
- package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +173 -0
- package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +227 -0
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +203 -0
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +230 -0
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +204 -0
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +221 -0
- package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +177 -0
- package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +182 -0
- package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +167 -0
- package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +299 -0
- package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +190 -0
- package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +226 -0
- package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +249 -0
- package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +244 -0
- package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +42 -0
- package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +209 -0
- package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +161 -0
- package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +222 -0
- package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +152 -0
- package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +197 -0
- package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +173 -0
- package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +221 -0
- package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +175 -0
- package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +153 -0
- package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +172 -0
- package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +175 -0
- package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +163 -0
- package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +155 -0
- package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +174 -0
- package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +194 -0
- package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +1 -1
- package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +158 -0
- package/templates/api/src/boards/graphql/access_control.py +1 -1
- package/templates/api/src/boards/graphql/mutations/root.py +16 -4
- package/templates/api/src/boards/graphql/resolvers/board.py +0 -2
- package/templates/api/src/boards/graphql/resolvers/generation.py +10 -233
- package/templates/api/src/boards/graphql/resolvers/lineage.py +381 -0
- package/templates/api/src/boards/graphql/resolvers/upload.py +463 -0
- package/templates/api/src/boards/graphql/types/generation.py +62 -26
- package/templates/api/src/boards/middleware.py +1 -1
- package/templates/api/src/boards/storage/factory.py +2 -2
- package/templates/api/src/boards/tenant_isolation.py +9 -9
- package/templates/api/src/boards/workers/actors.py +10 -1
- package/templates/web/package.json +1 -1
- package/templates/web/src/app/boards/[boardId]/page.tsx +14 -5
- package/templates/web/src/app/lineage/[generationId]/page.tsx +233 -0
- package/templates/web/src/components/boards/ArtifactPreview.tsx +20 -1
- package/templates/web/src/components/boards/UploadArtifact.tsx +253 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""
|
|
2
|
+
fal.ai GPT Image 1 Mini text-to-image generator.
|
|
3
|
+
|
|
4
|
+
Generate images using OpenAI's GPT-5 language capabilities combined with GPT Image 1 Mini
|
|
5
|
+
for efficient image generation.
|
|
6
|
+
|
|
7
|
+
Based on Fal AI's fal-ai/gpt-image-1-mini model.
|
|
8
|
+
See: https://fal.ai/models/fal-ai/gpt-image-1-mini
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
from typing import Literal
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, Field
|
|
15
|
+
|
|
16
|
+
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class GptImage1MiniInput(BaseModel):
|
|
20
|
+
"""Input schema for GPT Image 1 Mini.
|
|
21
|
+
|
|
22
|
+
Artifact fields are automatically detected via type introspection
|
|
23
|
+
and resolved from generation IDs to artifact objects.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
prompt: str = Field(
|
|
27
|
+
description="Image generation instruction",
|
|
28
|
+
min_length=3,
|
|
29
|
+
max_length=5000,
|
|
30
|
+
)
|
|
31
|
+
num_images: int = Field(
|
|
32
|
+
default=1,
|
|
33
|
+
ge=1,
|
|
34
|
+
le=4,
|
|
35
|
+
description="Number of images to generate",
|
|
36
|
+
)
|
|
37
|
+
output_format: Literal["jpeg", "png", "webp"] = Field(
|
|
38
|
+
default="jpeg",
|
|
39
|
+
description="Output image format",
|
|
40
|
+
)
|
|
41
|
+
sync_mode: bool = Field(
|
|
42
|
+
default=False,
|
|
43
|
+
description=(
|
|
44
|
+
"If True, the media will be returned as a data URI and the output "
|
|
45
|
+
"data won't be available in the request history"
|
|
46
|
+
),
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class FalGptImage1MiniGenerator(BaseGenerator):
|
|
51
|
+
"""GPT Image 1 Mini text-to-image generator using fal.ai."""
|
|
52
|
+
|
|
53
|
+
name = "fal-gpt-image-1-mini"
|
|
54
|
+
artifact_type = "image"
|
|
55
|
+
description = "Fal: GPT Image 1 Mini - Efficient text-to-image generation with GPT-5"
|
|
56
|
+
|
|
57
|
+
def get_input_schema(self) -> type[GptImage1MiniInput]:
|
|
58
|
+
return GptImage1MiniInput
|
|
59
|
+
|
|
60
|
+
async def generate(
|
|
61
|
+
self, inputs: GptImage1MiniInput, context: GeneratorExecutionContext
|
|
62
|
+
) -> GeneratorResult:
|
|
63
|
+
"""Generate images using fal.ai gpt-image-1-mini model."""
|
|
64
|
+
# Check for API key (fal-client uses FAL_KEY environment variable)
|
|
65
|
+
if not os.getenv("FAL_KEY"):
|
|
66
|
+
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
67
|
+
|
|
68
|
+
# Import fal_client
|
|
69
|
+
try:
|
|
70
|
+
import fal_client
|
|
71
|
+
except ImportError as e:
|
|
72
|
+
raise ImportError(
|
|
73
|
+
"fal.ai SDK is required for FalGptImage1MiniGenerator. "
|
|
74
|
+
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
75
|
+
) from e
|
|
76
|
+
|
|
77
|
+
# Prepare arguments for fal.ai API
|
|
78
|
+
arguments = {
|
|
79
|
+
"prompt": inputs.prompt,
|
|
80
|
+
"num_images": inputs.num_images,
|
|
81
|
+
"output_format": inputs.output_format,
|
|
82
|
+
"sync_mode": inputs.sync_mode,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
# Submit async job and get handler
|
|
86
|
+
handler = await fal_client.submit_async(
|
|
87
|
+
"fal-ai/gpt-image-1-mini",
|
|
88
|
+
arguments=arguments,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Store the external job ID for tracking
|
|
92
|
+
await context.set_external_job_id(handler.request_id)
|
|
93
|
+
|
|
94
|
+
# Stream progress updates (sample every 3rd event to avoid spam)
|
|
95
|
+
from .....progress.models import ProgressUpdate
|
|
96
|
+
|
|
97
|
+
event_count = 0
|
|
98
|
+
async for event in handler.iter_events(with_logs=True):
|
|
99
|
+
event_count += 1
|
|
100
|
+
|
|
101
|
+
# Process every 3rd event to provide feedback without overwhelming
|
|
102
|
+
if event_count % 3 == 0:
|
|
103
|
+
# Extract logs if available
|
|
104
|
+
logs = getattr(event, "logs", None)
|
|
105
|
+
if logs:
|
|
106
|
+
# Join log entries into a single message
|
|
107
|
+
if isinstance(logs, list):
|
|
108
|
+
message = " | ".join(str(log) for log in logs if log)
|
|
109
|
+
else:
|
|
110
|
+
message = str(logs)
|
|
111
|
+
|
|
112
|
+
if message:
|
|
113
|
+
await context.publish_progress(
|
|
114
|
+
ProgressUpdate(
|
|
115
|
+
job_id=handler.request_id,
|
|
116
|
+
status="processing",
|
|
117
|
+
progress=50.0, # Approximate mid-point progress
|
|
118
|
+
phase="processing",
|
|
119
|
+
message=message,
|
|
120
|
+
)
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Get final result
|
|
124
|
+
result = await handler.get()
|
|
125
|
+
|
|
126
|
+
# Extract image URLs and description from result
|
|
127
|
+
# fal.ai returns: {
|
|
128
|
+
# "images": [{"url": "...", "content_type": "...", ...}, ...],
|
|
129
|
+
# "description": "Text description"
|
|
130
|
+
# }
|
|
131
|
+
images = result.get("images", [])
|
|
132
|
+
|
|
133
|
+
if not images:
|
|
134
|
+
raise ValueError("No images returned from fal.ai API")
|
|
135
|
+
|
|
136
|
+
# Store each image using output_index
|
|
137
|
+
artifacts = []
|
|
138
|
+
for idx, image_data in enumerate(images):
|
|
139
|
+
image_url = image_data.get("url")
|
|
140
|
+
# Extract dimensions if available, otherwise use sensible defaults
|
|
141
|
+
# Use 'or' to handle explicit None values from API
|
|
142
|
+
width = image_data.get("width") or 1024
|
|
143
|
+
height = image_data.get("height") or 1024
|
|
144
|
+
|
|
145
|
+
if not image_url:
|
|
146
|
+
raise ValueError(f"Image {idx} missing URL in fal.ai response")
|
|
147
|
+
|
|
148
|
+
# Store with appropriate output_index
|
|
149
|
+
artifact = await context.store_image_result(
|
|
150
|
+
storage_url=image_url,
|
|
151
|
+
format=inputs.output_format,
|
|
152
|
+
width=width,
|
|
153
|
+
height=height,
|
|
154
|
+
output_index=idx,
|
|
155
|
+
)
|
|
156
|
+
artifacts.append(artifact)
|
|
157
|
+
|
|
158
|
+
return GeneratorResult(outputs=artifacts)
|
|
159
|
+
|
|
160
|
+
async def estimate_cost(self, inputs: GptImage1MiniInput) -> float:
|
|
161
|
+
"""Estimate cost for GPT Image 1 Mini generation.
|
|
162
|
+
|
|
163
|
+
Using estimated cost per image (pricing not documented).
|
|
164
|
+
"""
|
|
165
|
+
# Estimated cost per image
|
|
166
|
+
per_image_cost = 0.01
|
|
167
|
+
return per_image_cost * inputs.num_images
|
package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
"""
|
|
2
|
+
fal.ai Ideogram V3 Character Edit generator.
|
|
3
|
+
|
|
4
|
+
Modify consistent characters while preserving their core identity.
|
|
5
|
+
Edit poses, expressions, or clothing without losing recognizable character features.
|
|
6
|
+
|
|
7
|
+
Based on Fal AI's fal-ai/ideogram/character/edit model.
|
|
8
|
+
See: https://fal.ai/models/fal-ai/ideogram/character/edit
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
from typing import Literal
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, Field
|
|
15
|
+
|
|
16
|
+
from ....artifacts import ImageArtifact
|
|
17
|
+
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class RGBColor(BaseModel):
|
|
21
|
+
"""RGB color definition."""
|
|
22
|
+
|
|
23
|
+
r: int = Field(default=0, ge=0, le=255, description="Red color value")
|
|
24
|
+
g: int = Field(default=0, ge=0, le=255, description="Green color value")
|
|
25
|
+
b: int = Field(default=0, ge=0, le=255, description="Blue color value")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ColorPaletteMember(BaseModel):
|
|
29
|
+
"""Color palette member with RGB color and weight."""
|
|
30
|
+
|
|
31
|
+
rgb: RGBColor = Field(description="RGB color definition")
|
|
32
|
+
color_weight: float = Field(
|
|
33
|
+
default=0.5,
|
|
34
|
+
ge=0.05,
|
|
35
|
+
le=1.0,
|
|
36
|
+
description="The weight of the color in the color palette",
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ColorPalette(BaseModel):
|
|
41
|
+
"""Color palette for generation.
|
|
42
|
+
|
|
43
|
+
Can be specified via presets or explicit hexadecimal representations.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
name: (
|
|
47
|
+
Literal[
|
|
48
|
+
"EMBER",
|
|
49
|
+
"FRESH",
|
|
50
|
+
"JUNGLE",
|
|
51
|
+
"MAGIC",
|
|
52
|
+
"MELON",
|
|
53
|
+
"MOSAIC",
|
|
54
|
+
"PASTEL",
|
|
55
|
+
"ULTRAMARINE",
|
|
56
|
+
]
|
|
57
|
+
| None
|
|
58
|
+
) = Field(default=None, description="Preset color palette name")
|
|
59
|
+
members: list[ColorPaletteMember] | None = Field(
|
|
60
|
+
default=None,
|
|
61
|
+
description="Explicit color palette members with RGB values and weights",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class IdeogramCharacterEditInput(BaseModel):
|
|
66
|
+
"""Input schema for Ideogram Character Edit.
|
|
67
|
+
|
|
68
|
+
Artifact fields are automatically detected via type introspection
|
|
69
|
+
and resolved from generation IDs to artifact objects.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
prompt: str = Field(description="The prompt to fill the masked part of the image")
|
|
73
|
+
image_url: ImageArtifact = Field(
|
|
74
|
+
description=(
|
|
75
|
+
"The image to generate from. MUST have the exact same dimensions as the mask image"
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
mask_url: ImageArtifact = Field(
|
|
79
|
+
description=(
|
|
80
|
+
"The mask to inpaint the image. MUST have the exact same dimensions as the input image"
|
|
81
|
+
)
|
|
82
|
+
)
|
|
83
|
+
reference_image_urls: list[ImageArtifact] = Field(
|
|
84
|
+
description=(
|
|
85
|
+
"A set of images to use as character references. "
|
|
86
|
+
"Currently only 1 image is supported, rest will be ignored "
|
|
87
|
+
"(maximum total size 10MB)"
|
|
88
|
+
),
|
|
89
|
+
min_length=1,
|
|
90
|
+
)
|
|
91
|
+
style: Literal["AUTO", "REALISTIC", "FICTION"] = Field(
|
|
92
|
+
default="AUTO",
|
|
93
|
+
description="The style type to generate with. Cannot be used with style_codes",
|
|
94
|
+
)
|
|
95
|
+
expand_prompt: bool = Field(
|
|
96
|
+
default=True,
|
|
97
|
+
description="Determine if MagicPrompt should be used in generating the request or not",
|
|
98
|
+
)
|
|
99
|
+
rendering_speed: Literal["TURBO", "BALANCED", "QUALITY"] = Field(
|
|
100
|
+
default="BALANCED",
|
|
101
|
+
description="The rendering speed to use",
|
|
102
|
+
)
|
|
103
|
+
reference_mask_urls: list[ImageArtifact] | None = Field(
|
|
104
|
+
default=None,
|
|
105
|
+
description=(
|
|
106
|
+
"A set of masks to apply to character references. "
|
|
107
|
+
"Currently only 1 mask is supported (maximum total size 10MB)"
|
|
108
|
+
),
|
|
109
|
+
)
|
|
110
|
+
image_urls: list[ImageArtifact] | None = Field(
|
|
111
|
+
default=None,
|
|
112
|
+
description="A set of images to use as style references (maximum total size 10MB)",
|
|
113
|
+
)
|
|
114
|
+
num_images: int = Field(
|
|
115
|
+
default=1,
|
|
116
|
+
ge=1,
|
|
117
|
+
le=8,
|
|
118
|
+
description="Number of images to generate",
|
|
119
|
+
)
|
|
120
|
+
style_codes: list[str] | None = Field(
|
|
121
|
+
default=None,
|
|
122
|
+
description="A list of 8 character hexadecimal codes representing the style of the image",
|
|
123
|
+
)
|
|
124
|
+
color_palette: ColorPalette | None = Field(
|
|
125
|
+
default=None,
|
|
126
|
+
description="A color palette for generation",
|
|
127
|
+
)
|
|
128
|
+
sync_mode: bool = Field(
|
|
129
|
+
default=False,
|
|
130
|
+
description=(
|
|
131
|
+
"If True, the media will be returned as a data URI "
|
|
132
|
+
"and output data won't be available in request history"
|
|
133
|
+
),
|
|
134
|
+
)
|
|
135
|
+
seed: int | None = Field(
|
|
136
|
+
default=None,
|
|
137
|
+
description="Seed for the random number generator",
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class FalIdeogramCharacterEditGenerator(BaseGenerator):
|
|
142
|
+
"""Generator for Ideogram V3 Character Edit - modify consistent characters."""
|
|
143
|
+
|
|
144
|
+
name = "fal-ideogram-character-edit"
|
|
145
|
+
artifact_type = "image"
|
|
146
|
+
description = (
|
|
147
|
+
"Fal: Ideogram V3 Character Edit - "
|
|
148
|
+
"Modify consistent characters while preserving their core identity"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
def get_input_schema(self) -> type[IdeogramCharacterEditInput]:
|
|
152
|
+
"""Return the input schema for this generator."""
|
|
153
|
+
return IdeogramCharacterEditInput
|
|
154
|
+
|
|
155
|
+
async def generate(
|
|
156
|
+
self, inputs: IdeogramCharacterEditInput, context: GeneratorExecutionContext
|
|
157
|
+
) -> GeneratorResult:
|
|
158
|
+
"""Generate edited character images using fal.ai ideogram/character/edit."""
|
|
159
|
+
# Check for API key
|
|
160
|
+
if not os.getenv("FAL_KEY"):
|
|
161
|
+
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
162
|
+
|
|
163
|
+
# Import fal_client
|
|
164
|
+
try:
|
|
165
|
+
import fal_client
|
|
166
|
+
except ImportError as e:
|
|
167
|
+
raise ImportError(
|
|
168
|
+
"fal.ai SDK is required for FalIdeogramCharacterEditGenerator. "
|
|
169
|
+
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
170
|
+
) from e
|
|
171
|
+
|
|
172
|
+
# Upload image artifacts to Fal's public storage
|
|
173
|
+
from ..utils import upload_artifacts_to_fal
|
|
174
|
+
|
|
175
|
+
# Upload required artifacts
|
|
176
|
+
image_url = await upload_artifacts_to_fal([inputs.image_url], context)
|
|
177
|
+
mask_url = await upload_artifacts_to_fal([inputs.mask_url], context)
|
|
178
|
+
reference_image_urls = await upload_artifacts_to_fal(inputs.reference_image_urls, context)
|
|
179
|
+
|
|
180
|
+
# Upload optional artifacts
|
|
181
|
+
reference_mask_urls = None
|
|
182
|
+
if inputs.reference_mask_urls:
|
|
183
|
+
reference_mask_urls = await upload_artifacts_to_fal(inputs.reference_mask_urls, context)
|
|
184
|
+
|
|
185
|
+
style_reference_urls = None
|
|
186
|
+
if inputs.image_urls:
|
|
187
|
+
style_reference_urls = await upload_artifacts_to_fal(inputs.image_urls, context)
|
|
188
|
+
|
|
189
|
+
# Prepare arguments for fal.ai API
|
|
190
|
+
arguments = {
|
|
191
|
+
"prompt": inputs.prompt,
|
|
192
|
+
"image_url": image_url[0], # Single URL
|
|
193
|
+
"mask_url": mask_url[0], # Single URL
|
|
194
|
+
"reference_image_urls": reference_image_urls, # Array
|
|
195
|
+
"style": inputs.style,
|
|
196
|
+
"expand_prompt": inputs.expand_prompt,
|
|
197
|
+
"rendering_speed": inputs.rendering_speed,
|
|
198
|
+
"num_images": inputs.num_images,
|
|
199
|
+
"sync_mode": inputs.sync_mode,
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
# Add optional parameters
|
|
203
|
+
if reference_mask_urls:
|
|
204
|
+
arguments["reference_mask_urls"] = reference_mask_urls
|
|
205
|
+
|
|
206
|
+
if style_reference_urls:
|
|
207
|
+
arguments["image_urls"] = style_reference_urls
|
|
208
|
+
|
|
209
|
+
if inputs.style_codes:
|
|
210
|
+
arguments["style_codes"] = inputs.style_codes
|
|
211
|
+
|
|
212
|
+
if inputs.color_palette:
|
|
213
|
+
# Convert Pydantic model to dict for API
|
|
214
|
+
arguments["color_palette"] = inputs.color_palette.model_dump(exclude_none=True)
|
|
215
|
+
|
|
216
|
+
if inputs.seed is not None:
|
|
217
|
+
arguments["seed"] = inputs.seed
|
|
218
|
+
|
|
219
|
+
# Submit async job
|
|
220
|
+
handler = await fal_client.submit_async(
|
|
221
|
+
"fal-ai/ideogram/character/edit",
|
|
222
|
+
arguments=arguments,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Store external job ID
|
|
226
|
+
await context.set_external_job_id(handler.request_id)
|
|
227
|
+
|
|
228
|
+
# Stream progress updates
|
|
229
|
+
from .....progress.models import ProgressUpdate
|
|
230
|
+
|
|
231
|
+
event_count = 0
|
|
232
|
+
async for event in handler.iter_events(with_logs=True):
|
|
233
|
+
event_count += 1
|
|
234
|
+
# Sample every 3rd event to avoid spam
|
|
235
|
+
if event_count % 3 == 0:
|
|
236
|
+
# Extract logs if available
|
|
237
|
+
logs = getattr(event, "logs", None)
|
|
238
|
+
if logs:
|
|
239
|
+
# Join log entries into a single message
|
|
240
|
+
if isinstance(logs, list):
|
|
241
|
+
message = " | ".join(str(log) for log in logs if log)
|
|
242
|
+
else:
|
|
243
|
+
message = str(logs)
|
|
244
|
+
|
|
245
|
+
if message:
|
|
246
|
+
await context.publish_progress(
|
|
247
|
+
ProgressUpdate(
|
|
248
|
+
job_id=handler.request_id,
|
|
249
|
+
status="processing",
|
|
250
|
+
progress=50.0,
|
|
251
|
+
phase="processing",
|
|
252
|
+
message=message,
|
|
253
|
+
)
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
# Get final result
|
|
257
|
+
result = await handler.get()
|
|
258
|
+
|
|
259
|
+
# Extract images from result
|
|
260
|
+
# API returns: {"images": [{"url": "...", ...}], "seed": 123}
|
|
261
|
+
images = result.get("images", [])
|
|
262
|
+
|
|
263
|
+
if not images:
|
|
264
|
+
raise ValueError("No images returned from fal.ai API")
|
|
265
|
+
|
|
266
|
+
# Store each image using output_index
|
|
267
|
+
artifacts = []
|
|
268
|
+
for idx, image_data in enumerate(images):
|
|
269
|
+
image_url = image_data.get("url")
|
|
270
|
+
if not image_url:
|
|
271
|
+
raise ValueError(f"Image {idx} missing URL in fal.ai response")
|
|
272
|
+
|
|
273
|
+
# Extract dimensions if available (Ideogram typically generates at fixed sizes)
|
|
274
|
+
width = image_data.get("width", 1024)
|
|
275
|
+
height = image_data.get("height", 1024)
|
|
276
|
+
|
|
277
|
+
# Determine format from content_type or default to webp
|
|
278
|
+
content_type = image_data.get("content_type", "image/webp")
|
|
279
|
+
format_str = content_type.split("/")[-1] if "/" in content_type else "webp"
|
|
280
|
+
|
|
281
|
+
artifact = await context.store_image_result(
|
|
282
|
+
storage_url=image_url,
|
|
283
|
+
format=format_str,
|
|
284
|
+
width=width,
|
|
285
|
+
height=height,
|
|
286
|
+
output_index=idx,
|
|
287
|
+
)
|
|
288
|
+
artifacts.append(artifact)
|
|
289
|
+
|
|
290
|
+
return GeneratorResult(outputs=artifacts)
|
|
291
|
+
|
|
292
|
+
async def estimate_cost(self, inputs: IdeogramCharacterEditInput) -> float:
|
|
293
|
+
"""Estimate cost for this generation in USD.
|
|
294
|
+
|
|
295
|
+
Pricing information not available in documentation.
|
|
296
|
+
Using estimated cost of $0.05 per image.
|
|
297
|
+
"""
|
|
298
|
+
cost_per_image = 0.05
|
|
299
|
+
return cost_per_image * inputs.num_images
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Generate high-quality images, posters, and logos with exceptional typography handling.
|
|
3
|
+
|
|
4
|
+
Based on Fal AI's fal-ai/ideogram/v2 model.
|
|
5
|
+
See: https://fal.ai/models/fal-ai/ideogram/v2
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from typing import Literal
|
|
10
|
+
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class IdeogramV2Input(BaseModel):
|
|
17
|
+
"""Input schema for Ideogram V2 image generation.
|
|
18
|
+
|
|
19
|
+
Ideogram V2 is optimized for generating high-quality images with exceptional
|
|
20
|
+
typography handling, making it ideal for posters, logos, and creative content.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
prompt: str = Field(description="Text description for image generation")
|
|
24
|
+
aspect_ratio: Literal[
|
|
25
|
+
"1:1",
|
|
26
|
+
"16:9",
|
|
27
|
+
"9:16",
|
|
28
|
+
"4:3",
|
|
29
|
+
"3:4",
|
|
30
|
+
"10:16",
|
|
31
|
+
"16:10",
|
|
32
|
+
"1:3",
|
|
33
|
+
"3:1",
|
|
34
|
+
"3:2",
|
|
35
|
+
"2:3",
|
|
36
|
+
] = Field(
|
|
37
|
+
default="1:1",
|
|
38
|
+
description="Aspect ratio for the generated image",
|
|
39
|
+
)
|
|
40
|
+
style: Literal["auto", "general", "realistic", "design", "render_3D", "anime"] = Field(
|
|
41
|
+
default="auto",
|
|
42
|
+
description="Visual style for the generated image",
|
|
43
|
+
)
|
|
44
|
+
expand_prompt: bool = Field(
|
|
45
|
+
default=True,
|
|
46
|
+
description="Enable MagicPrompt functionality to enhance the prompt",
|
|
47
|
+
)
|
|
48
|
+
seed: int | None = Field(
|
|
49
|
+
default=None,
|
|
50
|
+
description="Random seed for reproducibility (optional)",
|
|
51
|
+
)
|
|
52
|
+
negative_prompt: str = Field(
|
|
53
|
+
default="",
|
|
54
|
+
description="Elements to exclude from the generated image",
|
|
55
|
+
)
|
|
56
|
+
sync_mode: bool = Field(
|
|
57
|
+
default=False,
|
|
58
|
+
description="Use synchronous mode (returns data URI instead of storing in history)",
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class FalIdeogramV2Generator(BaseGenerator):
|
|
63
|
+
"""Generator for high-quality images with exceptional typography using Ideogram V2."""
|
|
64
|
+
|
|
65
|
+
name = "fal-ideogram-v2"
|
|
66
|
+
artifact_type = "image"
|
|
67
|
+
description = (
|
|
68
|
+
"Fal: Ideogram V2 - high-quality images, posters, and logos with exceptional typography"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
def get_input_schema(self) -> type[IdeogramV2Input]:
|
|
72
|
+
"""Return the input schema for this generator."""
|
|
73
|
+
return IdeogramV2Input
|
|
74
|
+
|
|
75
|
+
async def generate(
|
|
76
|
+
self, inputs: IdeogramV2Input, context: GeneratorExecutionContext
|
|
77
|
+
) -> GeneratorResult:
|
|
78
|
+
"""Generate images using fal.ai Ideogram V2 model."""
|
|
79
|
+
# Check for API key (fal-client uses FAL_KEY environment variable)
|
|
80
|
+
if not os.getenv("FAL_KEY"):
|
|
81
|
+
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
82
|
+
|
|
83
|
+
# Import fal_client
|
|
84
|
+
try:
|
|
85
|
+
import fal_client
|
|
86
|
+
except ImportError as e:
|
|
87
|
+
raise ImportError(
|
|
88
|
+
"fal.ai SDK is required for FalIdeogramV2Generator. "
|
|
89
|
+
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
90
|
+
) from e
|
|
91
|
+
|
|
92
|
+
# Prepare arguments for fal.ai API
|
|
93
|
+
arguments = {
|
|
94
|
+
"prompt": inputs.prompt,
|
|
95
|
+
"aspect_ratio": inputs.aspect_ratio,
|
|
96
|
+
"style": inputs.style,
|
|
97
|
+
"expand_prompt": inputs.expand_prompt,
|
|
98
|
+
"negative_prompt": inputs.negative_prompt,
|
|
99
|
+
"sync_mode": inputs.sync_mode,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# Add seed if provided
|
|
103
|
+
if inputs.seed is not None:
|
|
104
|
+
arguments["seed"] = inputs.seed
|
|
105
|
+
|
|
106
|
+
# Submit async job and get handler
|
|
107
|
+
handler = await fal_client.submit_async(
|
|
108
|
+
"fal-ai/ideogram/v2",
|
|
109
|
+
arguments=arguments,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Store the external job ID for tracking
|
|
113
|
+
await context.set_external_job_id(handler.request_id)
|
|
114
|
+
|
|
115
|
+
# Stream progress updates (sample every 3rd event to avoid spam)
|
|
116
|
+
from .....progress.models import ProgressUpdate
|
|
117
|
+
|
|
118
|
+
event_count = 0
|
|
119
|
+
async for event in handler.iter_events(with_logs=True):
|
|
120
|
+
event_count += 1
|
|
121
|
+
|
|
122
|
+
# Process every 3rd event to provide feedback without overwhelming
|
|
123
|
+
if event_count % 3 == 0:
|
|
124
|
+
# Extract logs if available
|
|
125
|
+
logs = getattr(event, "logs", None)
|
|
126
|
+
if logs:
|
|
127
|
+
# Join log entries into a single message
|
|
128
|
+
if isinstance(logs, list):
|
|
129
|
+
message = " | ".join(str(log) for log in logs if log)
|
|
130
|
+
else:
|
|
131
|
+
message = str(logs)
|
|
132
|
+
|
|
133
|
+
if message:
|
|
134
|
+
await context.publish_progress(
|
|
135
|
+
ProgressUpdate(
|
|
136
|
+
job_id=handler.request_id,
|
|
137
|
+
status="processing",
|
|
138
|
+
progress=50.0, # Approximate mid-point progress
|
|
139
|
+
phase="processing",
|
|
140
|
+
message=message,
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Get final result
|
|
145
|
+
result = await handler.get()
|
|
146
|
+
|
|
147
|
+
# Extract image data from result
|
|
148
|
+
# fal.ai ideogram/v2 returns:
|
|
149
|
+
# {"images": [{"url": "...", "content_type": "...", ...}], "seed": ...}
|
|
150
|
+
images = result.get("images", [])
|
|
151
|
+
if not images:
|
|
152
|
+
raise ValueError("No images returned from fal.ai API")
|
|
153
|
+
|
|
154
|
+
# Store each image using output_index
|
|
155
|
+
artifacts = []
|
|
156
|
+
for idx, image_data in enumerate(images):
|
|
157
|
+
image_url = image_data.get("url")
|
|
158
|
+
|
|
159
|
+
if not image_url:
|
|
160
|
+
raise ValueError(f"Image {idx} missing URL in fal.ai response")
|
|
161
|
+
|
|
162
|
+
# Extract dimensions if available, use defaults otherwise
|
|
163
|
+
# Ideogram V2 doesn't return explicit width/height in the response schema,
|
|
164
|
+
# so we'll use reasonable defaults based on aspect ratio
|
|
165
|
+
width = image_data.get("width", 1024)
|
|
166
|
+
height = image_data.get("height", 1024)
|
|
167
|
+
|
|
168
|
+
# Determine format from content_type (e.g., "image/png" -> "png")
|
|
169
|
+
content_type = image_data.get("content_type", "image/png")
|
|
170
|
+
format = content_type.split("/")[-1] if "/" in content_type else "png"
|
|
171
|
+
|
|
172
|
+
# Store with appropriate output_index
|
|
173
|
+
artifact = await context.store_image_result(
|
|
174
|
+
storage_url=image_url,
|
|
175
|
+
format=format,
|
|
176
|
+
width=width,
|
|
177
|
+
height=height,
|
|
178
|
+
output_index=idx,
|
|
179
|
+
)
|
|
180
|
+
artifacts.append(artifact)
|
|
181
|
+
|
|
182
|
+
return GeneratorResult(outputs=artifacts)
|
|
183
|
+
|
|
184
|
+
async def estimate_cost(self, inputs: IdeogramV2Input) -> float:
|
|
185
|
+
"""Estimate cost for Ideogram V2 generation.
|
|
186
|
+
|
|
187
|
+
Ideogram V2 pricing is approximately $0.04 per image generation.
|
|
188
|
+
Note: Actual pricing may vary. Check Fal AI documentation for current rates.
|
|
189
|
+
"""
|
|
190
|
+
return 0.04 # $0.04 per image (estimate)
|