@weirdfingers/baseboards 0.9.6 → 0.9.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +560 -469
- package/dist/index.js.map +1 -1
- package/package.json +2 -5
- package/templates/README.md +0 -122
- package/templates/api/.env.example +0 -65
- package/templates/api/ARTIFACT_RESOLUTION_GUIDE.md +0 -148
- package/templates/api/Dockerfile +0 -32
- package/templates/api/README.md +0 -264
- package/templates/api/alembic/env.py +0 -114
- package/templates/api/alembic/script.py.mako +0 -28
- package/templates/api/alembic/versions/20250101_000000_initial_schema.py +0 -506
- package/templates/api/alembic/versions/20251022_174729_remove_provider_name_from_generations.py +0 -75
- package/templates/api/alembic/versions/20251023_165852_switch_to_declarative_base_and_mapping.py +0 -467
- package/templates/api/alembic/versions/20251202_000000_add_artifact_lineage.py +0 -134
- package/templates/api/alembic/versions/2025925_62735_add_seed_data_for_default_tenant.py +0 -88
- package/templates/api/alembic.ini +0 -36
- package/templates/api/config/generators.yaml +0 -237
- package/templates/api/config/storage_config.yaml +0 -26
- package/templates/api/docs/ADDING_GENERATORS.md +0 -409
- package/templates/api/docs/GENERATORS_API.md +0 -502
- package/templates/api/docs/MIGRATIONS.md +0 -472
- package/templates/api/docs/TESTING_LIVE_APIS.md +0 -417
- package/templates/api/docs/storage_providers.md +0 -337
- package/templates/api/pyproject.toml +0 -205
- package/templates/api/src/boards/__init__.py +0 -10
- package/templates/api/src/boards/api/app.py +0 -172
- package/templates/api/src/boards/api/auth.py +0 -75
- package/templates/api/src/boards/api/endpoints/__init__.py +0 -3
- package/templates/api/src/boards/api/endpoints/jobs.py +0 -76
- package/templates/api/src/boards/api/endpoints/setup.py +0 -505
- package/templates/api/src/boards/api/endpoints/sse.py +0 -129
- package/templates/api/src/boards/api/endpoints/storage.py +0 -155
- package/templates/api/src/boards/api/endpoints/tenant_registration.py +0 -296
- package/templates/api/src/boards/api/endpoints/uploads.py +0 -149
- package/templates/api/src/boards/api/endpoints/webhooks.py +0 -13
- package/templates/api/src/boards/auth/__init__.py +0 -15
- package/templates/api/src/boards/auth/adapters/__init__.py +0 -27
- package/templates/api/src/boards/auth/adapters/auth0.py +0 -220
- package/templates/api/src/boards/auth/adapters/base.py +0 -73
- package/templates/api/src/boards/auth/adapters/clerk.py +0 -172
- package/templates/api/src/boards/auth/adapters/jwt.py +0 -122
- package/templates/api/src/boards/auth/adapters/none.py +0 -102
- package/templates/api/src/boards/auth/adapters/oidc.py +0 -284
- package/templates/api/src/boards/auth/adapters/supabase.py +0 -110
- package/templates/api/src/boards/auth/context.py +0 -35
- package/templates/api/src/boards/auth/factory.py +0 -129
- package/templates/api/src/boards/auth/middleware.py +0 -221
- package/templates/api/src/boards/auth/provisioning.py +0 -129
- package/templates/api/src/boards/auth/tenant_extraction.py +0 -278
- package/templates/api/src/boards/cli.py +0 -354
- package/templates/api/src/boards/config.py +0 -131
- package/templates/api/src/boards/database/__init__.py +0 -7
- package/templates/api/src/boards/database/cli.py +0 -110
- package/templates/api/src/boards/database/connection.py +0 -292
- package/templates/api/src/boards/database/models.py +0 -19
- package/templates/api/src/boards/database/seed_data.py +0 -182
- package/templates/api/src/boards/dbmodels/__init__.py +0 -441
- package/templates/api/src/boards/generators/__init__.py +0 -57
- package/templates/api/src/boards/generators/artifact_resolution.py +0 -405
- package/templates/api/src/boards/generators/artifacts.py +0 -53
- package/templates/api/src/boards/generators/base.py +0 -144
- package/templates/api/src/boards/generators/implementations/__init__.py +0 -14
- package/templates/api/src/boards/generators/implementations/fal/__init__.py +0 -25
- package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +0 -23
- package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_music_generation.py +0 -171
- package/templates/api/src/boards/generators/implementations/fal/audio/beatoven_sound_effect_generation.py +0 -167
- package/templates/api/src/boards/generators/implementations/fal/audio/chatterbox_text_to_speech.py +0 -176
- package/templates/api/src/boards/generators/implementations/fal/audio/chatterbox_tts_turbo.py +0 -195
- package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_sound_effects_v2.py +0 -194
- package/templates/api/src/boards/generators/implementations/fal/audio/elevenlabs_tts_eleven_v3.py +0 -209
- package/templates/api/src/boards/generators/implementations/fal/audio/fal_elevenlabs_tts_turbo_v2_5.py +0 -206
- package/templates/api/src/boards/generators/implementations/fal/audio/fal_minimax_speech_26_hd.py +0 -237
- package/templates/api/src/boards/generators/implementations/fal/audio/minimax_music_v2.py +0 -173
- package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +0 -221
- package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +0 -63
- package/templates/api/src/boards/generators/implementations/fal/image/bytedance_seedream_v45_edit.py +0 -219
- package/templates/api/src/boards/generators/implementations/fal/image/clarity_upscaler.py +0 -220
- package/templates/api/src/boards/generators/implementations/fal/image/crystal_upscaler.py +0 -173
- package/templates/api/src/boards/generators/implementations/fal/image/fal_ideogram_character.py +0 -227
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2.py +0 -203
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2_edit.py +0 -230
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro.py +0 -204
- package/templates/api/src/boards/generators/implementations/fal/image/flux_2_pro_edit.py +0 -221
- package/templates/api/src/boards/generators/implementations/fal/image/flux_pro_kontext.py +0 -216
- package/templates/api/src/boards/generators/implementations/fal/image/flux_pro_ultra.py +0 -197
- package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image.py +0 -177
- package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image_edit.py +0 -208
- package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_15_edit.py +0 -216
- package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_5.py +0 -177
- package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_edit_image.py +0 -182
- package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_mini.py +0 -167
- package/templates/api/src/boards/generators/implementations/fal/image/ideogram_character_edit.py +0 -299
- package/templates/api/src/boards/generators/implementations/fal/image/ideogram_v2.py +0 -190
- package/templates/api/src/boards/generators/implementations/fal/image/imagen4_preview.py +0 -191
- package/templates/api/src/boards/generators/implementations/fal/image/imagen4_preview_fast.py +0 -179
- package/templates/api/src/boards/generators/implementations/fal/image/nano_banana.py +0 -183
- package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_edit.py +0 -212
- package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro.py +0 -179
- package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_pro_edit.py +0 -226
- package/templates/api/src/boards/generators/implementations/fal/image/qwen_image.py +0 -249
- package/templates/api/src/boards/generators/implementations/fal/image/qwen_image_edit.py +0 -244
- package/templates/api/src/boards/generators/implementations/fal/image/reve_edit.py +0 -178
- package/templates/api/src/boards/generators/implementations/fal/image/reve_text_to_image.py +0 -155
- package/templates/api/src/boards/generators/implementations/fal/image/seedream_v45_text_to_image.py +0 -180
- package/templates/api/src/boards/generators/implementations/fal/utils.py +0 -61
- package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +0 -77
- package/templates/api/src/boards/generators/implementations/fal/video/bytedance_seedance_v1_pro_text_to_video.py +0 -209
- package/templates/api/src/boards/generators/implementations/fal/video/creatify_lipsync.py +0 -161
- package/templates/api/src/boards/generators/implementations/fal/video/fal_bytedance_seedance_v1_pro_image_to_video.py +0 -222
- package/templates/api/src/boards/generators/implementations/fal/video/fal_minimax_hailuo_02_standard_text_to_video.py +0 -152
- package/templates/api/src/boards/generators/implementations/fal/video/fal_pixverse_lipsync.py +0 -197
- package/templates/api/src/boards/generators/implementations/fal/video/fal_sora_2_text_to_video.py +0 -173
- package/templates/api/src/boards/generators/implementations/fal/video/infinitalk.py +0 -221
- package/templates/api/src/boards/generators/implementations/fal/video/kling_video_ai_avatar_v2_pro.py +0 -168
- package/templates/api/src/boards/generators/implementations/fal/video/kling_video_ai_avatar_v2_standard.py +0 -159
- package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_image_to_video.py +0 -175
- package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_text_to_video.py +0 -168
- package/templates/api/src/boards/generators/implementations/fal/video/minimax_hailuo_2_3_pro_image_to_video.py +0 -153
- package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py +0 -172
- package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py +0 -175
- package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py +0 -163
- package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2.py +0 -167
- package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2_pro.py +0 -155
- package/templates/api/src/boards/generators/implementations/fal/video/veed_fabric_1_0.py +0 -180
- package/templates/api/src/boards/generators/implementations/fal/video/veed_lipsync.py +0 -174
- package/templates/api/src/boards/generators/implementations/fal/video/veo3.py +0 -194
- package/templates/api/src/boards/generators/implementations/fal/video/veo31.py +0 -190
- package/templates/api/src/boards/generators/implementations/fal/video/veo31_fast.py +0 -190
- package/templates/api/src/boards/generators/implementations/fal/video/veo31_fast_image_to_video.py +0 -191
- package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +0 -187
- package/templates/api/src/boards/generators/implementations/fal/video/veo31_image_to_video.py +0 -183
- package/templates/api/src/boards/generators/implementations/fal/video/veo31_reference_to_video.py +0 -172
- package/templates/api/src/boards/generators/implementations/fal/video/wan_25_preview_image_to_video.py +0 -212
- package/templates/api/src/boards/generators/implementations/fal/video/wan_25_preview_text_to_video.py +0 -208
- package/templates/api/src/boards/generators/implementations/fal/video/wan_pro_image_to_video.py +0 -158
- package/templates/api/src/boards/generators/implementations/kie/__init__.py +0 -11
- package/templates/api/src/boards/generators/implementations/kie/base.py +0 -316
- package/templates/api/src/boards/generators/implementations/kie/image/__init__.py +0 -3
- package/templates/api/src/boards/generators/implementations/kie/image/nano_banana_edit.py +0 -190
- package/templates/api/src/boards/generators/implementations/kie/utils.py +0 -98
- package/templates/api/src/boards/generators/implementations/kie/video/__init__.py +0 -8
- package/templates/api/src/boards/generators/implementations/kie/video/veo3.py +0 -161
- package/templates/api/src/boards/generators/implementations/openai/__init__.py +0 -1
- package/templates/api/src/boards/generators/implementations/openai/audio/__init__.py +0 -1
- package/templates/api/src/boards/generators/implementations/openai/audio/whisper.py +0 -69
- package/templates/api/src/boards/generators/implementations/openai/image/__init__.py +0 -1
- package/templates/api/src/boards/generators/implementations/openai/image/dalle3.py +0 -96
- package/templates/api/src/boards/generators/implementations/replicate/__init__.py +0 -1
- package/templates/api/src/boards/generators/implementations/replicate/image/__init__.py +0 -1
- package/templates/api/src/boards/generators/implementations/replicate/image/flux_pro.py +0 -88
- package/templates/api/src/boards/generators/implementations/replicate/video/__init__.py +0 -1
- package/templates/api/src/boards/generators/implementations/replicate/video/lipsync.py +0 -73
- package/templates/api/src/boards/generators/loader.py +0 -253
- package/templates/api/src/boards/generators/registry.py +0 -114
- package/templates/api/src/boards/generators/resolution.py +0 -632
- package/templates/api/src/boards/generators/testmods/class_gen.py +0 -34
- package/templates/api/src/boards/generators/testmods/import_side_effect.py +0 -35
- package/templates/api/src/boards/graphql/__init__.py +0 -7
- package/templates/api/src/boards/graphql/access_control.py +0 -136
- package/templates/api/src/boards/graphql/mutations/root.py +0 -148
- package/templates/api/src/boards/graphql/queries/root.py +0 -116
- package/templates/api/src/boards/graphql/resolvers/__init__.py +0 -8
- package/templates/api/src/boards/graphql/resolvers/auth.py +0 -12
- package/templates/api/src/boards/graphql/resolvers/board.py +0 -1053
- package/templates/api/src/boards/graphql/resolvers/generation.py +0 -666
- package/templates/api/src/boards/graphql/resolvers/generator.py +0 -50
- package/templates/api/src/boards/graphql/resolvers/lineage.py +0 -381
- package/templates/api/src/boards/graphql/resolvers/upload.py +0 -463
- package/templates/api/src/boards/graphql/resolvers/user.py +0 -25
- package/templates/api/src/boards/graphql/schema.py +0 -81
- package/templates/api/src/boards/graphql/types/board.py +0 -102
- package/templates/api/src/boards/graphql/types/generation.py +0 -166
- package/templates/api/src/boards/graphql/types/generator.py +0 -17
- package/templates/api/src/boards/graphql/types/user.py +0 -47
- package/templates/api/src/boards/jobs/repository.py +0 -153
- package/templates/api/src/boards/logging.py +0 -195
- package/templates/api/src/boards/middleware.py +0 -339
- package/templates/api/src/boards/progress/__init__.py +0 -4
- package/templates/api/src/boards/progress/models.py +0 -25
- package/templates/api/src/boards/progress/publisher.py +0 -64
- package/templates/api/src/boards/py.typed +0 -0
- package/templates/api/src/boards/redis_pool.py +0 -118
- package/templates/api/src/boards/storage/__init__.py +0 -52
- package/templates/api/src/boards/storage/base.py +0 -363
- package/templates/api/src/boards/storage/config.py +0 -187
- package/templates/api/src/boards/storage/factory.py +0 -288
- package/templates/api/src/boards/storage/implementations/__init__.py +0 -27
- package/templates/api/src/boards/storage/implementations/gcs.py +0 -340
- package/templates/api/src/boards/storage/implementations/local.py +0 -201
- package/templates/api/src/boards/storage/implementations/s3.py +0 -294
- package/templates/api/src/boards/storage/implementations/supabase.py +0 -218
- package/templates/api/src/boards/tenant_isolation.py +0 -446
- package/templates/api/src/boards/validation.py +0 -262
- package/templates/api/src/boards/workers/__init__.py +0 -1
- package/templates/api/src/boards/workers/actors.py +0 -274
- package/templates/api/src/boards/workers/cli.py +0 -125
- package/templates/api/src/boards/workers/context.py +0 -348
- package/templates/api/src/boards/workers/middleware.py +0 -58
- package/templates/api/src/py.typed +0 -0
- package/templates/compose.web.yaml +0 -35
- package/templates/compose.yaml +0 -116
- package/templates/docker/env.example +0 -23
- package/templates/web/.env.example +0 -28
- package/templates/web/Dockerfile +0 -51
- package/templates/web/components.json +0 -22
- package/templates/web/imageLoader.js +0 -18
- package/templates/web/next-env.d.ts +0 -5
- package/templates/web/next.config.js +0 -36
- package/templates/web/package.json +0 -41
- package/templates/web/postcss.config.mjs +0 -7
- package/templates/web/public/favicon.ico +0 -0
- package/templates/web/src/app/boards/[boardId]/page.tsx +0 -353
- package/templates/web/src/app/globals.css +0 -123
- package/templates/web/src/app/layout.tsx +0 -31
- package/templates/web/src/app/lineage/[generationId]/page.tsx +0 -235
- package/templates/web/src/app/page.tsx +0 -35
- package/templates/web/src/app/providers.tsx +0 -18
- package/templates/web/src/components/boards/ArtifactInputSlots.tsx +0 -206
- package/templates/web/src/components/boards/ArtifactPreview.tsx +0 -466
- package/templates/web/src/components/boards/GenerationGrid.tsx +0 -282
- package/templates/web/src/components/boards/GenerationInput.tsx +0 -370
- package/templates/web/src/components/boards/GeneratorSelector.tsx +0 -272
- package/templates/web/src/components/boards/UploadArtifact.tsx +0 -563
- package/templates/web/src/components/header.tsx +0 -32
- package/templates/web/src/components/theme-provider.tsx +0 -10
- package/templates/web/src/components/theme-toggle.tsx +0 -75
- package/templates/web/src/components/ui/alert-dialog.tsx +0 -157
- package/templates/web/src/components/ui/button.tsx +0 -58
- package/templates/web/src/components/ui/card.tsx +0 -92
- package/templates/web/src/components/ui/dropdown-menu.tsx +0 -200
- package/templates/web/src/components/ui/navigation-menu.tsx +0 -168
- package/templates/web/src/components/ui/toast.tsx +0 -128
- package/templates/web/src/components/ui/toaster.tsx +0 -35
- package/templates/web/src/components/ui/use-toast.ts +0 -187
- package/templates/web/src/hooks/useGeneratorMRU.ts +0 -57
- package/templates/web/src/lib/utils.ts +0 -6
- package/templates/web/tsconfig.json +0 -41
package/templates/api/src/boards/generators/implementations/fal/video/sora2_image_to_video.py
DELETED
|
@@ -1,172 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Sora 2 image-to-video generator.
|
|
3
|
-
|
|
4
|
-
OpenAI's state-of-the-art video generation model that creates richly detailed,
|
|
5
|
-
dynamic clips with audio from natural language prompts and images.
|
|
6
|
-
|
|
7
|
-
Based on Fal AI's fal-ai/sora-2/image-to-video model.
|
|
8
|
-
See: https://fal.ai/models/fal-ai/sora-2/image-to-video
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import os
|
|
12
|
-
from typing import Literal
|
|
13
|
-
|
|
14
|
-
from pydantic import BaseModel, Field
|
|
15
|
-
|
|
16
|
-
from ....artifacts import ImageArtifact
|
|
17
|
-
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class Sora2ImageToVideoInput(BaseModel):
|
|
21
|
-
"""Input schema for Sora 2 image-to-video generation.
|
|
22
|
-
|
|
23
|
-
Artifact fields (image_url) are automatically detected via type
|
|
24
|
-
introspection and resolved from generation IDs to ImageArtifact objects.
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
prompt: str = Field(
|
|
28
|
-
description="The text prompt describing the video you want to generate",
|
|
29
|
-
min_length=1,
|
|
30
|
-
max_length=5000,
|
|
31
|
-
)
|
|
32
|
-
image_url: ImageArtifact = Field(description="The image to use as the first frame")
|
|
33
|
-
resolution: Literal["auto", "720p"] = Field(
|
|
34
|
-
default="auto",
|
|
35
|
-
description="Resolution of the generated video",
|
|
36
|
-
)
|
|
37
|
-
aspect_ratio: Literal["auto", "9:16", "16:9"] = Field(
|
|
38
|
-
default="auto",
|
|
39
|
-
description="Aspect ratio of the generated video",
|
|
40
|
-
)
|
|
41
|
-
duration: Literal[4, 8, 12] = Field(
|
|
42
|
-
default=4,
|
|
43
|
-
description="Duration of the generated video in seconds",
|
|
44
|
-
)
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class FalSora2ImageToVideoGenerator(BaseGenerator):
|
|
48
|
-
"""Generator for creating videos from images using OpenAI's Sora 2."""
|
|
49
|
-
|
|
50
|
-
name = "fal-sora2-image-to-video"
|
|
51
|
-
description = "Fal: Sora 2 - Generate videos from images with audio"
|
|
52
|
-
artifact_type = "video"
|
|
53
|
-
|
|
54
|
-
def get_input_schema(self) -> type[Sora2ImageToVideoInput]:
|
|
55
|
-
"""Return the input schema for this generator."""
|
|
56
|
-
return Sora2ImageToVideoInput
|
|
57
|
-
|
|
58
|
-
async def generate(
|
|
59
|
-
self, inputs: Sora2ImageToVideoInput, context: GeneratorExecutionContext
|
|
60
|
-
) -> GeneratorResult:
|
|
61
|
-
"""Generate video using fal.ai sora-2/image-to-video."""
|
|
62
|
-
# Check for API key
|
|
63
|
-
if not os.getenv("FAL_KEY"):
|
|
64
|
-
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
65
|
-
|
|
66
|
-
# Import fal_client
|
|
67
|
-
try:
|
|
68
|
-
import fal_client
|
|
69
|
-
except ImportError as e:
|
|
70
|
-
raise ImportError(
|
|
71
|
-
"fal.ai SDK is required for FalSora2ImageToVideoGenerator. "
|
|
72
|
-
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
73
|
-
) from e
|
|
74
|
-
|
|
75
|
-
# Upload image artifact to Fal's public storage
|
|
76
|
-
# Fal API requires publicly accessible URLs, but our storage_url might be:
|
|
77
|
-
# - Localhost URLs (not publicly accessible)
|
|
78
|
-
# - Private S3 buckets (not publicly accessible)
|
|
79
|
-
# So we upload to Fal's temporary storage first
|
|
80
|
-
from ..utils import upload_artifacts_to_fal
|
|
81
|
-
|
|
82
|
-
image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
|
|
83
|
-
|
|
84
|
-
# Prepare arguments for fal.ai API
|
|
85
|
-
arguments = {
|
|
86
|
-
"prompt": inputs.prompt,
|
|
87
|
-
"image_url": image_urls[0],
|
|
88
|
-
"resolution": inputs.resolution,
|
|
89
|
-
"aspect_ratio": inputs.aspect_ratio,
|
|
90
|
-
"duration": inputs.duration,
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
# Submit async job
|
|
94
|
-
handler = await fal_client.submit_async(
|
|
95
|
-
"fal-ai/sora-2/image-to-video",
|
|
96
|
-
arguments=arguments,
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
# Store external job ID
|
|
100
|
-
await context.set_external_job_id(handler.request_id)
|
|
101
|
-
|
|
102
|
-
# Stream progress updates
|
|
103
|
-
from .....progress.models import ProgressUpdate
|
|
104
|
-
|
|
105
|
-
event_count = 0
|
|
106
|
-
async for event in handler.iter_events(with_logs=True):
|
|
107
|
-
event_count += 1
|
|
108
|
-
# Sample every 3rd event to avoid spam
|
|
109
|
-
if event_count % 3 == 0:
|
|
110
|
-
# Extract logs if available
|
|
111
|
-
logs = getattr(event, "logs", None)
|
|
112
|
-
if logs:
|
|
113
|
-
# Join log entries into a single message
|
|
114
|
-
if isinstance(logs, list):
|
|
115
|
-
message = " | ".join(str(log) for log in logs if log)
|
|
116
|
-
else:
|
|
117
|
-
message = str(logs)
|
|
118
|
-
|
|
119
|
-
if message:
|
|
120
|
-
await context.publish_progress(
|
|
121
|
-
ProgressUpdate(
|
|
122
|
-
job_id=handler.request_id,
|
|
123
|
-
status="processing",
|
|
124
|
-
progress=50.0,
|
|
125
|
-
phase="processing",
|
|
126
|
-
message=message,
|
|
127
|
-
)
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
# Get final result
|
|
131
|
-
result = await handler.get()
|
|
132
|
-
|
|
133
|
-
# Extract video from result
|
|
134
|
-
# Expected structure: {"video": {"url": "...", "width": 1280, "height": 720, ...}}
|
|
135
|
-
video_data = result.get("video")
|
|
136
|
-
if not video_data:
|
|
137
|
-
raise ValueError("No video returned from fal.ai API")
|
|
138
|
-
|
|
139
|
-
video_url = video_data.get("url")
|
|
140
|
-
if not video_url:
|
|
141
|
-
raise ValueError("Video missing URL in fal.ai response")
|
|
142
|
-
|
|
143
|
-
# Extract metadata from response (if available)
|
|
144
|
-
width = video_data.get("width", 1280)
|
|
145
|
-
height = video_data.get("height", 720)
|
|
146
|
-
duration_seconds = video_data.get("duration", inputs.duration)
|
|
147
|
-
fps = video_data.get("fps", 30)
|
|
148
|
-
|
|
149
|
-
# Store video result
|
|
150
|
-
artifact = await context.store_video_result(
|
|
151
|
-
storage_url=video_url,
|
|
152
|
-
format="mp4",
|
|
153
|
-
width=width,
|
|
154
|
-
height=height,
|
|
155
|
-
duration=duration_seconds,
|
|
156
|
-
fps=fps,
|
|
157
|
-
output_index=0,
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
return GeneratorResult(outputs=[artifact])
|
|
161
|
-
|
|
162
|
-
async def estimate_cost(self, inputs: Sora2ImageToVideoInput) -> float:
|
|
163
|
-
"""Estimate cost for this generation in USD.
|
|
164
|
-
|
|
165
|
-
Note: Pricing information not disclosed in Fal documentation.
|
|
166
|
-
Using placeholder value that should be updated with actual pricing.
|
|
167
|
-
"""
|
|
168
|
-
# TODO: Update with actual pricing from Fal when available
|
|
169
|
-
# Estimate based on duration - longer videos likely cost more
|
|
170
|
-
base_cost = 0.20 # Placeholder estimate for 4s
|
|
171
|
-
duration_multiplier = inputs.duration / 4.0
|
|
172
|
-
return base_cost * duration_multiplier
|
package/templates/api/src/boards/generators/implementations/fal/video/sora_2_image_to_video_pro.py
DELETED
|
@@ -1,175 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
OpenAI Sora 2 Image-to-Video Pro generator.
|
|
3
|
-
|
|
4
|
-
Image-to-video endpoint for Sora 2 Pro, OpenAI's state-of-the-art video model
|
|
5
|
-
capable of creating richly detailed, dynamic clips with audio from natural
|
|
6
|
-
language prompts and images.
|
|
7
|
-
|
|
8
|
-
Based on Fal AI's fal-ai/sora-2/image-to-video/pro model.
|
|
9
|
-
See: https://fal.ai/models/fal-ai/sora-2/image-to-video/pro
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
import os
|
|
13
|
-
from typing import Literal
|
|
14
|
-
|
|
15
|
-
from pydantic import BaseModel, Field
|
|
16
|
-
|
|
17
|
-
from ....artifacts import ImageArtifact
|
|
18
|
-
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class Sora2ImageToVideoProInput(BaseModel):
|
|
22
|
-
"""Input schema for Sora 2 Image-to-Video Pro generation.
|
|
23
|
-
|
|
24
|
-
Artifact fields (image_url) are automatically detected via type
|
|
25
|
-
introspection and resolved from generation IDs to ImageArtifact objects.
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
prompt: str = Field(
|
|
29
|
-
description="The text prompt describing the video you want to generate",
|
|
30
|
-
min_length=1,
|
|
31
|
-
max_length=5000,
|
|
32
|
-
)
|
|
33
|
-
image_url: ImageArtifact = Field(description="The image to use as the first frame of the video")
|
|
34
|
-
resolution: Literal["auto", "720p", "1080p"] = Field(
|
|
35
|
-
default="auto",
|
|
36
|
-
description="Resolution of the generated video. 'auto' selects optimal resolution",
|
|
37
|
-
)
|
|
38
|
-
aspect_ratio: Literal["auto", "9:16", "16:9"] = Field(
|
|
39
|
-
default="auto",
|
|
40
|
-
description="Aspect ratio of the generated video. 'auto' uses the image's aspect ratio",
|
|
41
|
-
)
|
|
42
|
-
duration: Literal[4, 8, 12] = Field(
|
|
43
|
-
default=4,
|
|
44
|
-
description="Duration of the generated video in seconds",
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class FalSora2ImageToVideoProGenerator(BaseGenerator):
|
|
49
|
-
"""Generator for creating videos from images using OpenAI Sora 2 Pro."""
|
|
50
|
-
|
|
51
|
-
name = "fal-sora-2-image-to-video-pro"
|
|
52
|
-
description = "Fal: Sora 2 Pro - Create dynamic videos with audio from images and text prompts"
|
|
53
|
-
artifact_type = "video"
|
|
54
|
-
|
|
55
|
-
def get_input_schema(self) -> type[Sora2ImageToVideoProInput]:
|
|
56
|
-
"""Return the input schema for this generator."""
|
|
57
|
-
return Sora2ImageToVideoProInput
|
|
58
|
-
|
|
59
|
-
async def generate(
|
|
60
|
-
self, inputs: Sora2ImageToVideoProInput, context: GeneratorExecutionContext
|
|
61
|
-
) -> GeneratorResult:
|
|
62
|
-
"""Generate video using fal.ai sora-2/image-to-video/pro."""
|
|
63
|
-
# Check for API key
|
|
64
|
-
if not os.getenv("FAL_KEY"):
|
|
65
|
-
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
66
|
-
|
|
67
|
-
# Import fal_client
|
|
68
|
-
try:
|
|
69
|
-
import fal_client
|
|
70
|
-
except ImportError as e:
|
|
71
|
-
raise ImportError(
|
|
72
|
-
"fal.ai SDK is required for FalSora2ImageToVideoProGenerator. "
|
|
73
|
-
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
74
|
-
) from e
|
|
75
|
-
|
|
76
|
-
# Upload image artifact to Fal's public storage
|
|
77
|
-
# Fal API requires publicly accessible URLs, but our storage_url might be:
|
|
78
|
-
# - Localhost URLs (not publicly accessible)
|
|
79
|
-
# - Private S3 buckets (not publicly accessible)
|
|
80
|
-
# So we upload to Fal's temporary storage first
|
|
81
|
-
from ..utils import upload_artifacts_to_fal
|
|
82
|
-
|
|
83
|
-
image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
|
|
84
|
-
|
|
85
|
-
# Prepare arguments for fal.ai API
|
|
86
|
-
arguments = {
|
|
87
|
-
"prompt": inputs.prompt,
|
|
88
|
-
"image_url": image_urls[0],
|
|
89
|
-
"resolution": inputs.resolution,
|
|
90
|
-
"aspect_ratio": inputs.aspect_ratio,
|
|
91
|
-
"duration": inputs.duration,
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
# Submit async job
|
|
95
|
-
handler = await fal_client.submit_async(
|
|
96
|
-
"fal-ai/sora-2/image-to-video/pro",
|
|
97
|
-
arguments=arguments,
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
# Store external job ID
|
|
101
|
-
await context.set_external_job_id(handler.request_id)
|
|
102
|
-
|
|
103
|
-
# Stream progress updates
|
|
104
|
-
from .....progress.models import ProgressUpdate
|
|
105
|
-
|
|
106
|
-
event_count = 0
|
|
107
|
-
async for event in handler.iter_events(with_logs=True):
|
|
108
|
-
event_count += 1
|
|
109
|
-
# Sample every 3rd event to avoid spam
|
|
110
|
-
if event_count % 3 == 0:
|
|
111
|
-
# Extract logs if available
|
|
112
|
-
logs = getattr(event, "logs", None)
|
|
113
|
-
if logs:
|
|
114
|
-
# Join log entries into a single message
|
|
115
|
-
if isinstance(logs, list):
|
|
116
|
-
message = " | ".join(str(log) for log in logs if log)
|
|
117
|
-
else:
|
|
118
|
-
message = str(logs)
|
|
119
|
-
|
|
120
|
-
if message:
|
|
121
|
-
await context.publish_progress(
|
|
122
|
-
ProgressUpdate(
|
|
123
|
-
job_id=handler.request_id,
|
|
124
|
-
status="processing",
|
|
125
|
-
progress=50.0,
|
|
126
|
-
phase="processing",
|
|
127
|
-
message=message,
|
|
128
|
-
)
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
# Get final result
|
|
132
|
-
result = await handler.get()
|
|
133
|
-
|
|
134
|
-
# Extract video from result
|
|
135
|
-
# Expected structure: {"video": {"url": "...", "width": ..., "height": ..., ...}}
|
|
136
|
-
video_data = result.get("video")
|
|
137
|
-
if not video_data:
|
|
138
|
-
raise ValueError("No video returned from fal.ai API")
|
|
139
|
-
|
|
140
|
-
video_url = video_data.get("url")
|
|
141
|
-
if not video_url:
|
|
142
|
-
raise ValueError("Video missing URL in fal.ai response")
|
|
143
|
-
|
|
144
|
-
# Extract video metadata from response
|
|
145
|
-
width = video_data.get("width", 1280)
|
|
146
|
-
height = video_data.get("height", 720)
|
|
147
|
-
duration_seconds = video_data.get("duration", inputs.duration)
|
|
148
|
-
fps = video_data.get("fps", 30)
|
|
149
|
-
|
|
150
|
-
# Store video result
|
|
151
|
-
artifact = await context.store_video_result(
|
|
152
|
-
storage_url=video_url,
|
|
153
|
-
format="mp4",
|
|
154
|
-
width=width,
|
|
155
|
-
height=height,
|
|
156
|
-
duration=duration_seconds,
|
|
157
|
-
fps=fps,
|
|
158
|
-
output_index=0,
|
|
159
|
-
)
|
|
160
|
-
|
|
161
|
-
return GeneratorResult(outputs=[artifact])
|
|
162
|
-
|
|
163
|
-
async def estimate_cost(self, inputs: Sora2ImageToVideoProInput) -> float:
|
|
164
|
-
"""Estimate cost for this generation in USD.
|
|
165
|
-
|
|
166
|
-
Note: Pricing information not available in Fal documentation.
|
|
167
|
-
Using placeholder value that should be updated with actual pricing.
|
|
168
|
-
"""
|
|
169
|
-
# TODO: Update with actual pricing from Fal when available
|
|
170
|
-
# Cost likely increases with duration
|
|
171
|
-
base_cost = 0.50 # Placeholder estimate for 4s
|
|
172
|
-
|
|
173
|
-
# Adjust for duration
|
|
174
|
-
duration_multiplier = inputs.duration / 4 # 4s is base
|
|
175
|
-
return base_cost * duration_multiplier
|
package/templates/api/src/boards/generators/implementations/fal/video/sora_2_text_to_video_pro.py
DELETED
|
@@ -1,163 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Sora 2 Pro text-to-video generator.
|
|
3
|
-
|
|
4
|
-
OpenAI's state-of-the-art video model capable of creating richly detailed,
|
|
5
|
-
dynamic clips with audio from natural language descriptions.
|
|
6
|
-
|
|
7
|
-
Based on Fal AI's fal-ai/sora-2/text-to-video/pro model.
|
|
8
|
-
See: https://fal.ai/models/fal-ai/sora-2/text-to-video/pro
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import os
|
|
12
|
-
from typing import Literal
|
|
13
|
-
|
|
14
|
-
from pydantic import BaseModel, Field
|
|
15
|
-
|
|
16
|
-
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class Sora2TextToVideoProInput(BaseModel):
|
|
20
|
-
"""Input schema for Sora 2 Pro text-to-video generation."""
|
|
21
|
-
|
|
22
|
-
prompt: str = Field(
|
|
23
|
-
description="Describes the desired video",
|
|
24
|
-
min_length=1,
|
|
25
|
-
max_length=5000,
|
|
26
|
-
)
|
|
27
|
-
resolution: Literal["720p", "1080p"] = Field(
|
|
28
|
-
default="1080p",
|
|
29
|
-
description="Video resolution",
|
|
30
|
-
)
|
|
31
|
-
aspect_ratio: Literal["9:16", "16:9"] = Field(
|
|
32
|
-
default="16:9",
|
|
33
|
-
description="Video aspect ratio",
|
|
34
|
-
)
|
|
35
|
-
duration: Literal[4, 8, 12] = Field(
|
|
36
|
-
default=4,
|
|
37
|
-
description="Video duration in seconds",
|
|
38
|
-
)
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class FalSora2TextToVideoProGenerator(BaseGenerator):
|
|
42
|
-
"""Generator for text-to-video using Sora 2 Pro."""
|
|
43
|
-
|
|
44
|
-
name = "fal-sora-2-text-to-video-pro"
|
|
45
|
-
description = "Fal: Sora 2 Pro - OpenAI's state-of-the-art text-to-video model with audio"
|
|
46
|
-
artifact_type = "video"
|
|
47
|
-
|
|
48
|
-
def get_input_schema(self) -> type[Sora2TextToVideoProInput]:
|
|
49
|
-
"""Return the input schema for this generator."""
|
|
50
|
-
return Sora2TextToVideoProInput
|
|
51
|
-
|
|
52
|
-
async def generate(
|
|
53
|
-
self, inputs: Sora2TextToVideoProInput, context: GeneratorExecutionContext
|
|
54
|
-
) -> GeneratorResult:
|
|
55
|
-
"""Generate video using fal.ai Sora 2 Pro model."""
|
|
56
|
-
# Check for API key
|
|
57
|
-
if not os.getenv("FAL_KEY"):
|
|
58
|
-
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
59
|
-
|
|
60
|
-
# Import fal_client
|
|
61
|
-
try:
|
|
62
|
-
import fal_client
|
|
63
|
-
except ImportError as e:
|
|
64
|
-
raise ImportError(
|
|
65
|
-
"fal.ai SDK is required for FalSora2TextToVideoProGenerator. "
|
|
66
|
-
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
67
|
-
) from e
|
|
68
|
-
|
|
69
|
-
# Prepare arguments for fal.ai API
|
|
70
|
-
arguments = {
|
|
71
|
-
"prompt": inputs.prompt,
|
|
72
|
-
"resolution": inputs.resolution,
|
|
73
|
-
"aspect_ratio": inputs.aspect_ratio,
|
|
74
|
-
"duration": inputs.duration,
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
# Submit async job
|
|
78
|
-
handler = await fal_client.submit_async(
|
|
79
|
-
"fal-ai/sora-2/text-to-video/pro",
|
|
80
|
-
arguments=arguments,
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
# Store external job ID
|
|
84
|
-
await context.set_external_job_id(handler.request_id)
|
|
85
|
-
|
|
86
|
-
# Stream progress updates
|
|
87
|
-
from .....progress.models import ProgressUpdate
|
|
88
|
-
|
|
89
|
-
event_count = 0
|
|
90
|
-
async for event in handler.iter_events(with_logs=True):
|
|
91
|
-
event_count += 1
|
|
92
|
-
# Sample every 3rd event to avoid spam
|
|
93
|
-
if event_count % 3 == 0:
|
|
94
|
-
# Extract logs if available
|
|
95
|
-
logs = getattr(event, "logs", None)
|
|
96
|
-
if logs:
|
|
97
|
-
# Join log entries into a single message
|
|
98
|
-
if isinstance(logs, list):
|
|
99
|
-
message = " | ".join(str(log) for log in logs if log)
|
|
100
|
-
else:
|
|
101
|
-
message = str(logs)
|
|
102
|
-
|
|
103
|
-
if message:
|
|
104
|
-
await context.publish_progress(
|
|
105
|
-
ProgressUpdate(
|
|
106
|
-
job_id=handler.request_id,
|
|
107
|
-
status="processing",
|
|
108
|
-
progress=50.0, # Approximate mid-point progress
|
|
109
|
-
phase="processing",
|
|
110
|
-
message=message,
|
|
111
|
-
)
|
|
112
|
-
)
|
|
113
|
-
|
|
114
|
-
# Get final result
|
|
115
|
-
result = await handler.get()
|
|
116
|
-
|
|
117
|
-
# Extract video from result
|
|
118
|
-
# fal.ai returns: {"video": {"url": "...", "width": 1920, "height": 1080, ...}}
|
|
119
|
-
video_data = result.get("video")
|
|
120
|
-
if not video_data:
|
|
121
|
-
raise ValueError("No video returned from fal.ai API")
|
|
122
|
-
|
|
123
|
-
video_url = video_data.get("url")
|
|
124
|
-
if not video_url:
|
|
125
|
-
raise ValueError("Video missing URL in fal.ai response")
|
|
126
|
-
|
|
127
|
-
# Extract dimensions from response
|
|
128
|
-
width = video_data.get("width", 1920)
|
|
129
|
-
height = video_data.get("height", 1080)
|
|
130
|
-
fps = video_data.get("fps")
|
|
131
|
-
duration = video_data.get("duration", float(inputs.duration))
|
|
132
|
-
|
|
133
|
-
# Store video result
|
|
134
|
-
artifact = await context.store_video_result(
|
|
135
|
-
storage_url=video_url,
|
|
136
|
-
format="mp4",
|
|
137
|
-
width=width,
|
|
138
|
-
height=height,
|
|
139
|
-
duration=duration,
|
|
140
|
-
fps=fps,
|
|
141
|
-
output_index=0,
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
return GeneratorResult(outputs=[artifact])
|
|
145
|
-
|
|
146
|
-
async def estimate_cost(self, inputs: Sora2TextToVideoProInput) -> float:
|
|
147
|
-
"""Estimate cost for Sora 2 Pro generation.
|
|
148
|
-
|
|
149
|
-
Pricing information not available in official documentation.
|
|
150
|
-
Estimated at $0.20-$0.80 per video based on duration and resolution.
|
|
151
|
-
Actual costs may vary.
|
|
152
|
-
"""
|
|
153
|
-
# Base cost per second of video
|
|
154
|
-
# Higher resolution and longer duration increase cost
|
|
155
|
-
base_cost_per_second = 0.05
|
|
156
|
-
|
|
157
|
-
# Resolution multiplier
|
|
158
|
-
resolution_multiplier = 1.5 if inputs.resolution == "1080p" else 1.0
|
|
159
|
-
|
|
160
|
-
# Calculate total cost
|
|
161
|
-
total_cost = base_cost_per_second * inputs.duration * resolution_multiplier
|
|
162
|
-
|
|
163
|
-
return total_cost
|
|
@@ -1,167 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
fal.ai sync-lipsync v2 video generator.
|
|
3
|
-
|
|
4
|
-
Generates realistic lip-synchronization animations from audio and video inputs
|
|
5
|
-
using fal.ai's sync-lipsync/v2 model. Supports advanced audio/video duration
|
|
6
|
-
mismatch handling with multiple sync modes.
|
|
7
|
-
|
|
8
|
-
Based on Fal AI's fal-ai/sync-lipsync/v2 model.
|
|
9
|
-
See: https://fal.ai/models/fal-ai/sync-lipsync/v2
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
import os
|
|
13
|
-
from typing import Literal
|
|
14
|
-
|
|
15
|
-
from pydantic import BaseModel, Field
|
|
16
|
-
|
|
17
|
-
from ....artifacts import AudioArtifact, VideoArtifact
|
|
18
|
-
from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class SyncLipsyncV2Input(BaseModel):
|
|
22
|
-
"""Input schema for sync-lipsync v2.
|
|
23
|
-
|
|
24
|
-
Artifact fields are automatically detected via type introspection
|
|
25
|
-
and resolved from generation IDs to artifact objects.
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
video: VideoArtifact = Field(description="Input video for lip-sync animation")
|
|
29
|
-
audio: AudioArtifact = Field(description="Audio to synchronize with the video")
|
|
30
|
-
model: Literal["lipsync-2", "lipsync-2-pro"] = Field(
|
|
31
|
-
default="lipsync-2",
|
|
32
|
-
description="Model selection; pro version costs ~1.67x more",
|
|
33
|
-
)
|
|
34
|
-
sync_mode: Literal["cut_off", "loop", "bounce", "silence", "remap"] = Field(
|
|
35
|
-
default="cut_off",
|
|
36
|
-
description="Handling method when audio/video durations mismatch",
|
|
37
|
-
)
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
class FalSyncLipsyncV2Generator(BaseGenerator):
|
|
41
|
-
"""Generator for realistic lip-synchronization animations."""
|
|
42
|
-
|
|
43
|
-
name = "fal-sync-lipsync-v2"
|
|
44
|
-
description = "Fal: sync-lipsync v2 - Realistic lip-sync animation with audio"
|
|
45
|
-
artifact_type = "video"
|
|
46
|
-
|
|
47
|
-
def get_input_schema(self) -> type[SyncLipsyncV2Input]:
|
|
48
|
-
"""Return the input schema for this generator."""
|
|
49
|
-
return SyncLipsyncV2Input
|
|
50
|
-
|
|
51
|
-
async def generate(
|
|
52
|
-
self, inputs: SyncLipsyncV2Input, context: GeneratorExecutionContext
|
|
53
|
-
) -> GeneratorResult:
|
|
54
|
-
"""Generate lip-synced video using fal.ai sync-lipsync/v2."""
|
|
55
|
-
# Check for API key
|
|
56
|
-
if not os.getenv("FAL_KEY"):
|
|
57
|
-
raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
|
|
58
|
-
|
|
59
|
-
# Import fal_client
|
|
60
|
-
try:
|
|
61
|
-
import fal_client
|
|
62
|
-
except ImportError as e:
|
|
63
|
-
raise ImportError(
|
|
64
|
-
"fal.ai SDK is required for FalSyncLipsyncV2Generator. "
|
|
65
|
-
"Install with: pip install weirdfingers-boards[generators-fal]"
|
|
66
|
-
) from e
|
|
67
|
-
|
|
68
|
-
# Upload video and audio artifacts to Fal's public storage
|
|
69
|
-
# Fal API requires publicly accessible URLs
|
|
70
|
-
from ..utils import upload_artifacts_to_fal
|
|
71
|
-
|
|
72
|
-
# Upload video and audio separately
|
|
73
|
-
video_urls = await upload_artifacts_to_fal([inputs.video], context)
|
|
74
|
-
audio_urls = await upload_artifacts_to_fal([inputs.audio], context)
|
|
75
|
-
|
|
76
|
-
# Prepare arguments for fal.ai API
|
|
77
|
-
arguments = {
|
|
78
|
-
"video_url": video_urls[0],
|
|
79
|
-
"audio_url": audio_urls[0],
|
|
80
|
-
"model": inputs.model,
|
|
81
|
-
"sync_mode": inputs.sync_mode,
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
# Submit async job
|
|
85
|
-
handler = await fal_client.submit_async(
|
|
86
|
-
"fal-ai/sync-lipsync/v2",
|
|
87
|
-
arguments=arguments,
|
|
88
|
-
)
|
|
89
|
-
|
|
90
|
-
# Store external job ID
|
|
91
|
-
await context.set_external_job_id(handler.request_id)
|
|
92
|
-
|
|
93
|
-
# Stream progress updates
|
|
94
|
-
from .....progress.models import ProgressUpdate
|
|
95
|
-
|
|
96
|
-
event_count = 0
|
|
97
|
-
async for event in handler.iter_events(with_logs=True):
|
|
98
|
-
event_count += 1
|
|
99
|
-
# Sample every 3rd event to avoid spam
|
|
100
|
-
if event_count % 3 == 0:
|
|
101
|
-
# Extract logs if available
|
|
102
|
-
logs = getattr(event, "logs", None)
|
|
103
|
-
if logs:
|
|
104
|
-
# Join log entries into a single message
|
|
105
|
-
if isinstance(logs, list):
|
|
106
|
-
message = " | ".join(str(log) for log in logs if log)
|
|
107
|
-
else:
|
|
108
|
-
message = str(logs)
|
|
109
|
-
|
|
110
|
-
if message:
|
|
111
|
-
await context.publish_progress(
|
|
112
|
-
ProgressUpdate(
|
|
113
|
-
job_id=handler.request_id,
|
|
114
|
-
status="processing",
|
|
115
|
-
progress=50.0, # Approximate mid-point progress
|
|
116
|
-
phase="processing",
|
|
117
|
-
message=message,
|
|
118
|
-
)
|
|
119
|
-
)
|
|
120
|
-
|
|
121
|
-
# Get final result
|
|
122
|
-
result = await handler.get()
|
|
123
|
-
|
|
124
|
-
# Extract video from result
|
|
125
|
-
# fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}}
|
|
126
|
-
video_data = result.get("video")
|
|
127
|
-
|
|
128
|
-
if not video_data:
|
|
129
|
-
raise ValueError("No video returned from fal.ai API")
|
|
130
|
-
|
|
131
|
-
video_url = video_data.get("url")
|
|
132
|
-
if not video_url:
|
|
133
|
-
raise ValueError("Video missing URL in fal.ai response")
|
|
134
|
-
|
|
135
|
-
# Extract format from content_type (e.g., "video/mp4" -> "mp4")
|
|
136
|
-
content_type = video_data.get("content_type", "video/mp4")
|
|
137
|
-
video_format = content_type.split("/")[-1] if "/" in content_type else "mp4"
|
|
138
|
-
|
|
139
|
-
# Store the video result
|
|
140
|
-
# Note: The API doesn't return width/height/duration/fps, so we use defaults
|
|
141
|
-
# The actual dimensions will be the same as the input video
|
|
142
|
-
artifact = await context.store_video_result(
|
|
143
|
-
storage_url=video_url,
|
|
144
|
-
format=video_format,
|
|
145
|
-
width=inputs.video.width,
|
|
146
|
-
height=inputs.video.height,
|
|
147
|
-
duration=inputs.audio.duration,
|
|
148
|
-
fps=inputs.video.fps,
|
|
149
|
-
output_index=0,
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
return GeneratorResult(outputs=[artifact])
|
|
153
|
-
|
|
154
|
-
async def estimate_cost(self, inputs: SyncLipsyncV2Input) -> float:
|
|
155
|
-
"""Estimate cost for sync-lipsync v2 generation in USD.
|
|
156
|
-
|
|
157
|
-
Pricing not specified in documentation, using estimate based on
|
|
158
|
-
typical video processing costs. Pro model costs ~1.67x more.
|
|
159
|
-
"""
|
|
160
|
-
# Base cost estimate per generation
|
|
161
|
-
base_cost = 0.05
|
|
162
|
-
|
|
163
|
-
# Pro model multiplier
|
|
164
|
-
if inputs.model == "lipsync-2-pro":
|
|
165
|
-
return base_cost * 1.67
|
|
166
|
-
|
|
167
|
-
return base_cost
|