@weirdfingers/baseboards 0.2.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +14 -4
  2. package/dist/index.js +13 -4
  3. package/dist/index.js.map +1 -1
  4. package/package.json +1 -1
  5. package/templates/api/ARTIFACT_RESOLUTION_GUIDE.md +148 -0
  6. package/templates/api/Dockerfile +2 -2
  7. package/templates/api/README.md +138 -6
  8. package/templates/api/config/generators.yaml +41 -7
  9. package/templates/api/docs/TESTING_LIVE_APIS.md +417 -0
  10. package/templates/api/pyproject.toml +49 -9
  11. package/templates/api/src/boards/__init__.py +1 -1
  12. package/templates/api/src/boards/auth/adapters/__init__.py +9 -2
  13. package/templates/api/src/boards/auth/factory.py +16 -2
  14. package/templates/api/src/boards/generators/__init__.py +2 -2
  15. package/templates/api/src/boards/generators/artifact_resolution.py +372 -0
  16. package/templates/api/src/boards/generators/artifacts.py +4 -4
  17. package/templates/api/src/boards/generators/base.py +8 -4
  18. package/templates/api/src/boards/generators/implementations/__init__.py +4 -2
  19. package/templates/api/src/boards/generators/implementations/fal/__init__.py +25 -0
  20. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +4 -0
  21. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_music_v2.py +173 -0
  22. package/templates/api/src/boards/generators/implementations/fal/audio/minimax_speech_2_6_turbo.py +221 -0
  23. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +17 -0
  24. package/templates/api/src/boards/generators/implementations/fal/image/flux_pro_kontext.py +216 -0
  25. package/templates/api/src/boards/generators/implementations/fal/image/flux_pro_ultra.py +197 -0
  26. package/templates/api/src/boards/generators/implementations/fal/image/imagen4_preview.py +191 -0
  27. package/templates/api/src/boards/generators/implementations/fal/image/imagen4_preview_fast.py +179 -0
  28. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana.py +183 -0
  29. package/templates/api/src/boards/generators/implementations/fal/image/nano_banana_edit.py +212 -0
  30. package/templates/api/src/boards/generators/implementations/fal/utils.py +61 -0
  31. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +13 -0
  32. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_v2_5_turbo_pro_text_to_video.py +168 -0
  33. package/templates/api/src/boards/generators/implementations/fal/video/sync_lipsync_v2.py +167 -0
  34. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +180 -0
  35. package/templates/api/src/boards/generators/implementations/openai/__init__.py +1 -0
  36. package/templates/api/src/boards/generators/implementations/openai/audio/__init__.py +1 -0
  37. package/templates/api/src/boards/generators/implementations/{audio → openai/audio}/whisper.py +9 -6
  38. package/templates/api/src/boards/generators/implementations/openai/image/__init__.py +1 -0
  39. package/templates/api/src/boards/generators/implementations/{image → openai/image}/dalle3.py +8 -5
  40. package/templates/api/src/boards/generators/implementations/replicate/__init__.py +1 -0
  41. package/templates/api/src/boards/generators/implementations/replicate/image/__init__.py +1 -0
  42. package/templates/api/src/boards/generators/implementations/{image → replicate/image}/flux_pro.py +8 -5
  43. package/templates/api/src/boards/generators/implementations/replicate/video/__init__.py +1 -0
  44. package/templates/api/src/boards/generators/implementations/{video → replicate/video}/lipsync.py +9 -6
  45. package/templates/api/src/boards/generators/resolution.py +80 -20
  46. package/templates/api/src/boards/jobs/repository.py +49 -0
  47. package/templates/api/src/boards/storage/factory.py +16 -6
  48. package/templates/api/src/boards/workers/actors.py +69 -5
  49. package/templates/api/src/boards/workers/context.py +177 -21
  50. package/templates/web/package.json +2 -1
  51. package/templates/web/src/components/boards/GenerationInput.tsx +154 -52
  52. package/templates/web/src/components/boards/GeneratorSelector.tsx +57 -59
  53. package/templates/web/src/components/ui/dropdown-menu.tsx +200 -0
  54. package/templates/api/src/boards/generators/implementations/audio/__init__.py +0 -3
  55. package/templates/api/src/boards/generators/implementations/image/__init__.py +0 -3
  56. package/templates/api/src/boards/generators/implementations/video/__init__.py +0 -3
@@ -0,0 +1,167 @@
1
+ """
2
+ fal.ai sync-lipsync v2 video generator.
3
+
4
+ Generates realistic lip-synchronization animations from audio and video inputs
5
+ using fal.ai's sync-lipsync/v2 model. Supports advanced audio/video duration
6
+ mismatch handling with multiple sync modes.
7
+
8
+ Based on Fal AI's fal-ai/sync-lipsync/v2 model.
9
+ See: https://fal.ai/models/fal-ai/sync-lipsync/v2
10
+ """
11
+
12
+ import os
13
+ from typing import Literal
14
+
15
+ from pydantic import BaseModel, Field
16
+
17
+ from ....artifacts import AudioArtifact, VideoArtifact
18
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
19
+
20
+
21
+ class SyncLipsyncV2Input(BaseModel):
22
+ """Input schema for sync-lipsync v2.
23
+
24
+ Artifact fields are automatically detected via type introspection
25
+ and resolved from generation IDs to artifact objects.
26
+ """
27
+
28
+ video: VideoArtifact = Field(description="Input video for lip-sync animation")
29
+ audio: AudioArtifact = Field(description="Audio to synchronize with the video")
30
+ model: Literal["lipsync-2", "lipsync-2-pro"] = Field(
31
+ default="lipsync-2",
32
+ description="Model selection; pro version costs ~1.67x more",
33
+ )
34
+ sync_mode: Literal["cut_off", "loop", "bounce", "silence", "remap"] = Field(
35
+ default="cut_off",
36
+ description="Handling method when audio/video durations mismatch",
37
+ )
38
+
39
+
40
+ class FalSyncLipsyncV2Generator(BaseGenerator):
41
+ """Generator for realistic lip-synchronization animations."""
42
+
43
+ name = "fal-sync-lipsync-v2"
44
+ description = "Fal: sync-lipsync v2 - Realistic lip-sync animation with audio"
45
+ artifact_type = "video"
46
+
47
+ def get_input_schema(self) -> type[SyncLipsyncV2Input]:
48
+ """Return the input schema for this generator."""
49
+ return SyncLipsyncV2Input
50
+
51
+ async def generate(
52
+ self, inputs: SyncLipsyncV2Input, context: GeneratorExecutionContext
53
+ ) -> GeneratorResult:
54
+ """Generate lip-synced video using fal.ai sync-lipsync/v2."""
55
+ # Check for API key
56
+ if not os.getenv("FAL_KEY"):
57
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
58
+
59
+ # Import fal_client
60
+ try:
61
+ import fal_client
62
+ except ImportError as e:
63
+ raise ImportError(
64
+ "fal.ai SDK is required for FalSyncLipsyncV2Generator. "
65
+ "Install with: pip install weirdfingers-boards[generators-fal]"
66
+ ) from e
67
+
68
+ # Upload video and audio artifacts to Fal's public storage
69
+ # Fal API requires publicly accessible URLs
70
+ from ..utils import upload_artifacts_to_fal
71
+
72
+ # Upload video and audio separately
73
+ video_urls = await upload_artifacts_to_fal([inputs.video], context)
74
+ audio_urls = await upload_artifacts_to_fal([inputs.audio], context)
75
+
76
+ # Prepare arguments for fal.ai API
77
+ arguments = {
78
+ "video_url": video_urls[0],
79
+ "audio_url": audio_urls[0],
80
+ "model": inputs.model,
81
+ "sync_mode": inputs.sync_mode,
82
+ }
83
+
84
+ # Submit async job
85
+ handler = await fal_client.submit_async(
86
+ "fal-ai/sync-lipsync/v2",
87
+ arguments=arguments,
88
+ )
89
+
90
+ # Store external job ID
91
+ await context.set_external_job_id(handler.request_id)
92
+
93
+ # Stream progress updates
94
+ from .....progress.models import ProgressUpdate
95
+
96
+ event_count = 0
97
+ async for event in handler.iter_events(with_logs=True):
98
+ event_count += 1
99
+ # Sample every 3rd event to avoid spam
100
+ if event_count % 3 == 0:
101
+ # Extract logs if available
102
+ logs = getattr(event, "logs", None)
103
+ if logs:
104
+ # Join log entries into a single message
105
+ if isinstance(logs, list):
106
+ message = " | ".join(str(log) for log in logs if log)
107
+ else:
108
+ message = str(logs)
109
+
110
+ if message:
111
+ await context.publish_progress(
112
+ ProgressUpdate(
113
+ job_id=handler.request_id,
114
+ status="processing",
115
+ progress=50.0, # Approximate mid-point progress
116
+ phase="processing",
117
+ message=message,
118
+ )
119
+ )
120
+
121
+ # Get final result
122
+ result = await handler.get()
123
+
124
+ # Extract video from result
125
+ # fal.ai returns: {"video": {"url": "...", "content_type": "video/mp4", ...}}
126
+ video_data = result.get("video")
127
+
128
+ if not video_data:
129
+ raise ValueError("No video returned from fal.ai API")
130
+
131
+ video_url = video_data.get("url")
132
+ if not video_url:
133
+ raise ValueError("Video missing URL in fal.ai response")
134
+
135
+ # Extract format from content_type (e.g., "video/mp4" -> "mp4")
136
+ content_type = video_data.get("content_type", "video/mp4")
137
+ video_format = content_type.split("/")[-1] if "/" in content_type else "mp4"
138
+
139
+ # Store the video result
140
+ # Note: The API doesn't return width/height/duration/fps, so we use defaults
141
+ # The actual dimensions will be the same as the input video
142
+ artifact = await context.store_video_result(
143
+ storage_url=video_url,
144
+ format=video_format,
145
+ width=inputs.video.width,
146
+ height=inputs.video.height,
147
+ duration=inputs.audio.duration,
148
+ fps=inputs.video.fps,
149
+ output_index=0,
150
+ )
151
+
152
+ return GeneratorResult(outputs=[artifact])
153
+
154
+ async def estimate_cost(self, inputs: SyncLipsyncV2Input) -> float:
155
+ """Estimate cost for sync-lipsync v2 generation in USD.
156
+
157
+ Pricing not specified in documentation, using estimate based on
158
+ typical video processing costs. Pro model costs ~1.67x more.
159
+ """
160
+ # Base cost estimate per generation
161
+ base_cost = 0.05
162
+
163
+ # Pro model multiplier
164
+ if inputs.model == "lipsync-2-pro":
165
+ return base_cost * 1.67
166
+
167
+ return base_cost
@@ -0,0 +1,180 @@
1
+ """
2
+ Google Veo 3.1 first-last frame to video generator.
3
+
4
+ Generates videos by interpolating between first and last frame images using
5
+ Google's Veo 3.1 technology via fal.ai.
6
+
7
+ Based on Fal AI's fal-ai/veo3.1/first-last-frame-to-video model.
8
+ See: https://fal.ai/models/fal-ai/veo3.1/first-last-frame-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class Veo31FirstLastFrameToVideoInput(BaseModel):
21
+ """Input schema for Veo 3.1 first-last frame to video generation.
22
+
23
+ Artifact fields (first_frame, last_frame) are automatically detected via type
24
+ introspection and resolved from generation IDs to ImageArtifact objects.
25
+ """
26
+
27
+ first_frame: ImageArtifact = Field(description="The first frame of the video (input image)")
28
+ last_frame: ImageArtifact = Field(description="The last frame of the video (input image)")
29
+ prompt: str = Field(description="Text prompt describing the desired video content and motion")
30
+ duration: Literal["8s"] = Field(
31
+ default="8s",
32
+ description="Duration of the generated video in seconds (currently only 8s is supported)",
33
+ )
34
+ aspect_ratio: Literal["auto", "9:16", "16:9", "1:1"] = Field(
35
+ default="auto",
36
+ description=(
37
+ "Aspect ratio of the generated video. " "'auto' uses the aspect ratio from input images"
38
+ ),
39
+ )
40
+ resolution: Literal["720p", "1080p"] = Field(
41
+ default="720p",
42
+ description="Resolution of the generated video",
43
+ )
44
+ generate_audio: bool = Field(
45
+ default=True,
46
+ description="Whether to generate audio for the video. Disabling uses 50% fewer credits",
47
+ )
48
+
49
+
50
+ class FalVeo31FirstLastFrameToVideoGenerator(BaseGenerator):
51
+ """Generator for creating videos from first and last frame images using Google Veo 3.1."""
52
+
53
+ name = "fal-veo31-first-last-frame-to-video"
54
+ description = "Fal: Veo 3.1 - Generate videos by interpolating between first and last frames"
55
+ artifact_type = "video"
56
+
57
+ def get_input_schema(self) -> type[Veo31FirstLastFrameToVideoInput]:
58
+ """Return the input schema for this generator."""
59
+ return Veo31FirstLastFrameToVideoInput
60
+
61
+ async def generate(
62
+ self, inputs: Veo31FirstLastFrameToVideoInput, context: GeneratorExecutionContext
63
+ ) -> GeneratorResult:
64
+ """Generate video using fal.ai veo3.1/first-last-frame-to-video."""
65
+ # Check for API key
66
+ if not os.getenv("FAL_KEY"):
67
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
68
+
69
+ # Import fal_client
70
+ try:
71
+ import fal_client
72
+ except ImportError as e:
73
+ raise ImportError(
74
+ "fal.ai SDK is required for FalVeo31FirstLastFrameToVideoGenerator. "
75
+ "Install with: pip install weirdfingers-boards[generators-fal]"
76
+ ) from e
77
+
78
+ # Upload image artifacts to Fal's public storage
79
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
80
+ # - Localhost URLs (not publicly accessible)
81
+ # - Private S3 buckets (not publicly accessible)
82
+ # So we upload to Fal's temporary storage first
83
+ from ..utils import upload_artifacts_to_fal
84
+
85
+ first_frame_urls = await upload_artifacts_to_fal([inputs.first_frame], context)
86
+ last_frame_urls = await upload_artifacts_to_fal([inputs.last_frame], context)
87
+
88
+ # Prepare arguments for fal.ai API
89
+ arguments = {
90
+ "first_frame_url": first_frame_urls[0],
91
+ "last_frame_url": last_frame_urls[0],
92
+ "prompt": inputs.prompt,
93
+ "duration": inputs.duration,
94
+ "aspect_ratio": inputs.aspect_ratio,
95
+ "resolution": inputs.resolution,
96
+ "generate_audio": inputs.generate_audio,
97
+ }
98
+
99
+ # Submit async job
100
+ handler = await fal_client.submit_async(
101
+ "fal-ai/veo3.1/first-last-frame-to-video",
102
+ arguments=arguments,
103
+ )
104
+
105
+ # Store external job ID
106
+ await context.set_external_job_id(handler.request_id)
107
+
108
+ # Stream progress updates
109
+ from .....progress.models import ProgressUpdate
110
+
111
+ event_count = 0
112
+ async for event in handler.iter_events(with_logs=True):
113
+ event_count += 1
114
+ # Sample every 3rd event to avoid spam
115
+ if event_count % 3 == 0:
116
+ # Extract logs if available
117
+ logs = getattr(event, "logs", None)
118
+ if logs:
119
+ # Join log entries into a single message
120
+ if isinstance(logs, list):
121
+ message = " | ".join(str(log) for log in logs if log)
122
+ else:
123
+ message = str(logs)
124
+
125
+ if message:
126
+ await context.publish_progress(
127
+ ProgressUpdate(
128
+ job_id=handler.request_id,
129
+ status="processing",
130
+ progress=50.0,
131
+ phase="processing",
132
+ message=message,
133
+ )
134
+ )
135
+
136
+ # Get final result
137
+ result = await handler.get()
138
+
139
+ # Extract video from result
140
+ # Expected structure: {"video": {"url": "...", "content_type": "...", ...}}
141
+ video_data = result.get("video")
142
+ if not video_data:
143
+ raise ValueError("No video returned from fal.ai API")
144
+
145
+ video_url = video_data.get("url")
146
+ if not video_url:
147
+ raise ValueError("Video missing URL in fal.ai response")
148
+
149
+ # Store video result
150
+ # Note: Fal API doesn't provide video dimensions/duration in the response,
151
+ # so we'll use defaults based on input parameters
152
+ width = 1280 if inputs.resolution == "720p" else 1920
153
+ height = 720 if inputs.resolution == "720p" else 1080
154
+
155
+ # Parse duration from "8s" format
156
+ duration_seconds = int(inputs.duration.rstrip("s"))
157
+
158
+ artifact = await context.store_video_result(
159
+ storage_url=video_url,
160
+ format="mp4",
161
+ width=width,
162
+ height=height,
163
+ duration=duration_seconds,
164
+ output_index=0,
165
+ )
166
+
167
+ return GeneratorResult(outputs=[artifact])
168
+
169
+ async def estimate_cost(self, inputs: Veo31FirstLastFrameToVideoInput) -> float:
170
+ """Estimate cost for this generation in USD.
171
+
172
+ Note: Pricing information not available in Fal documentation.
173
+ Using placeholder value that should be updated with actual pricing.
174
+ """
175
+ # TODO: Update with actual pricing from Fal when available
176
+ # Base cost, with 50% reduction if audio is disabled
177
+ base_cost = 0.15 # Placeholder estimate
178
+ if not inputs.generate_audio:
179
+ return base_cost * 0.5
180
+ return base_cost
@@ -0,0 +1 @@
1
+ """OpenAI provider generators."""
@@ -0,0 +1 @@
1
+ """OpenAI audio generators."""
@@ -6,8 +6,8 @@ Demonstrates audio processing generator that outputs text.
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
9
- from ...artifacts import AudioArtifact
10
- from ...base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
9
+ from ....artifacts import AudioArtifact
10
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
11
11
 
12
12
 
13
13
  class WhisperInput(BaseModel):
@@ -18,12 +18,12 @@ class WhisperInput(BaseModel):
18
18
  prompt: str = Field(default="", description="Optional prompt to guide transcription")
19
19
 
20
20
 
21
- class WhisperGenerator(BaseGenerator):
21
+ class OpenAIWhisperGenerator(BaseGenerator):
22
22
  """Whisper speech-to-text transcription using OpenAI API."""
23
23
 
24
- name = "whisper"
24
+ name = "openai-whisper"
25
25
  artifact_type = "text"
26
- description = "OpenAI Whisper - speech-to-text transcription"
26
+ description = "OpenAI: Whisper - speech-to-text transcription"
27
27
 
28
28
  def get_input_schema(self) -> type[WhisperInput]:
29
29
  return WhisperInput
@@ -35,7 +35,10 @@ class WhisperGenerator(BaseGenerator):
35
35
  try:
36
36
  from openai import AsyncOpenAI
37
37
  except ImportError as e:
38
- raise ValueError("Required dependencies not available") from e
38
+ raise ImportError(
39
+ "OpenAI SDK is required for WhisperGenerator. "
40
+ "Install with: pip install weirdfingers-boards[generators-openai]"
41
+ ) from e
39
42
 
40
43
  client = AsyncOpenAI()
41
44
 
@@ -0,0 +1 @@
1
+ """OpenAI image generators."""
@@ -8,7 +8,7 @@ import os
8
8
 
9
9
  from pydantic import BaseModel, Field
10
10
 
11
- from ...base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
11
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
12
12
 
13
13
 
14
14
  class DallE3Input(BaseModel):
@@ -24,12 +24,12 @@ class DallE3Input(BaseModel):
24
24
  style: str = Field(default="vivid", description="Image style", pattern="^(vivid|natural)$")
25
25
 
26
26
 
27
- class DallE3Generator(BaseGenerator):
27
+ class OpenAIDallE3Generator(BaseGenerator):
28
28
  """DALL-E 3 image generator using OpenAI API."""
29
29
 
30
- name = "dall-e-3"
30
+ name = "openai-dall-e-3"
31
31
  artifact_type = "image"
32
- description = "OpenAI's DALL-E 3 - advanced text-to-image generation"
32
+ description = "OpenAI: DALL-E 3 - advanced text-to-image generation"
33
33
 
34
34
  def get_input_schema(self) -> type[DallE3Input]:
35
35
  return DallE3Input
@@ -46,7 +46,10 @@ class DallE3Generator(BaseGenerator):
46
46
  try:
47
47
  from openai import AsyncOpenAI
48
48
  except ImportError as e:
49
- raise ValueError("Required dependencies not available") from e
49
+ raise ImportError(
50
+ "OpenAI SDK is required for DallE3Generator. "
51
+ "Install with: pip install weirdfingers-boards[generators-openai]"
52
+ ) from e
50
53
 
51
54
  client = AsyncOpenAI()
52
55
 
@@ -0,0 +1 @@
1
+ """Replicate provider generators."""
@@ -0,0 +1 @@
1
+ """Replicate image generators."""
@@ -12,7 +12,7 @@ from collections.abc import AsyncIterator
12
12
 
13
13
  from pydantic import BaseModel, Field
14
14
 
15
- from ...base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
16
 
17
17
 
18
18
  class FluxProInput(BaseModel):
@@ -27,12 +27,12 @@ class FluxProInput(BaseModel):
27
27
  safety_tolerance: int = Field(default=2, ge=1, le=5, description="Safety tolerance level (1-5)")
28
28
 
29
29
 
30
- class FluxProGenerator(BaseGenerator):
30
+ class ReplicateFluxProGenerator(BaseGenerator):
31
31
  """FLUX.1.1 Pro image generator using Replicate."""
32
32
 
33
- name = "flux-pro"
33
+ name = "replicate-flux-pro"
34
34
  artifact_type = "image"
35
- description = "FLUX.1.1 [pro] by Black Forest Labs - high-quality image generation"
35
+ description = "Replicate: FLUX.1.1 [pro] by Black Forest Labs - high-quality image generation"
36
36
 
37
37
  def get_input_schema(self) -> type[FluxProInput]:
38
38
  return FluxProInput
@@ -50,7 +50,10 @@ class FluxProGenerator(BaseGenerator):
50
50
  import replicate
51
51
  from replicate.helpers import FileOutput
52
52
  except ImportError as e:
53
- raise ValueError("Required dependencies not available") from e
53
+ raise ImportError(
54
+ "Replicate SDK is required for FluxProGenerator. "
55
+ "Install with: pip install weirdfingers-boards[generators-replicate]"
56
+ ) from e
54
57
 
55
58
  # Use Replicate SDK directly
56
59
  prediction: FileOutput | AsyncIterator[FileOutput] = await replicate.async_run(
@@ -0,0 +1 @@
1
+ """Replicate video generators."""
@@ -7,8 +7,8 @@ with automatic artifact resolution.
7
7
 
8
8
  from pydantic import BaseModel, Field
9
9
 
10
- from ...artifacts import AudioArtifact, VideoArtifact
11
- from ...base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
10
+ from ....artifacts import AudioArtifact, VideoArtifact
11
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
12
12
 
13
13
 
14
14
  class LipsyncInput(BaseModel):
@@ -19,12 +19,12 @@ class LipsyncInput(BaseModel):
19
19
  prompt: str | None = Field(None, description="Optional prompt for generation")
20
20
 
21
21
 
22
- class LipsyncGenerator(BaseGenerator):
22
+ class ReplicateLipsyncGenerator(BaseGenerator):
23
23
  """Lipsync generator that syncs lips in video to audio."""
24
24
 
25
- name = "lipsync"
25
+ name = "replicate-lipsync"
26
26
  artifact_type = "video"
27
- description = "Sync lips in video to match audio track"
27
+ description = "Replicate: Sync lips in video to match audio track"
28
28
 
29
29
  def get_input_schema(self) -> type[LipsyncInput]:
30
30
  return LipsyncInput
@@ -37,7 +37,10 @@ class LipsyncGenerator(BaseGenerator):
37
37
  try:
38
38
  import replicate # type: ignore
39
39
  except ImportError as e:
40
- raise ValueError("Required dependencies not available") from e
40
+ raise ImportError(
41
+ "Replicate SDK is required for LipsyncGenerator. "
42
+ "Install with: pip install weirdfingers-boards[generators-replicate]"
43
+ ) from e
41
44
 
42
45
  # Resolve artifacts via context
43
46
  audio_file = await context.resolve_artifact(inputs.audio_source)