@weirdfingers/baseboards 0.6.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/index.js +54 -28
  2. package/dist/index.js.map +1 -1
  3. package/package.json +1 -1
  4. package/templates/README.md +2 -0
  5. package/templates/api/.env.example +3 -0
  6. package/templates/api/config/generators.yaml +58 -0
  7. package/templates/api/pyproject.toml +1 -1
  8. package/templates/api/src/boards/__init__.py +1 -1
  9. package/templates/api/src/boards/api/endpoints/storage.py +85 -4
  10. package/templates/api/src/boards/api/endpoints/uploads.py +1 -2
  11. package/templates/api/src/boards/database/connection.py +98 -58
  12. package/templates/api/src/boards/generators/implementations/fal/audio/__init__.py +4 -0
  13. package/templates/api/src/boards/generators/implementations/fal/audio/chatterbox_text_to_speech.py +176 -0
  14. package/templates/api/src/boards/generators/implementations/fal/audio/chatterbox_tts_turbo.py +195 -0
  15. package/templates/api/src/boards/generators/implementations/fal/image/__init__.py +14 -0
  16. package/templates/api/src/boards/generators/implementations/fal/image/bytedance_seedream_v45_edit.py +219 -0
  17. package/templates/api/src/boards/generators/implementations/fal/image/gemini_25_flash_image_edit.py +208 -0
  18. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_15_edit.py +216 -0
  19. package/templates/api/src/boards/generators/implementations/fal/image/gpt_image_1_5.py +177 -0
  20. package/templates/api/src/boards/generators/implementations/fal/image/reve_edit.py +178 -0
  21. package/templates/api/src/boards/generators/implementations/fal/image/reve_text_to_image.py +155 -0
  22. package/templates/api/src/boards/generators/implementations/fal/image/seedream_v45_text_to_image.py +180 -0
  23. package/templates/api/src/boards/generators/implementations/fal/video/__init__.py +18 -0
  24. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_ai_avatar_v2_pro.py +168 -0
  25. package/templates/api/src/boards/generators/implementations/fal/video/kling_video_ai_avatar_v2_standard.py +159 -0
  26. package/templates/api/src/boards/generators/implementations/fal/video/veed_fabric_1_0.py +180 -0
  27. package/templates/api/src/boards/generators/implementations/fal/video/veo31.py +190 -0
  28. package/templates/api/src/boards/generators/implementations/fal/video/veo31_fast.py +190 -0
  29. package/templates/api/src/boards/generators/implementations/fal/video/veo31_fast_image_to_video.py +191 -0
  30. package/templates/api/src/boards/generators/implementations/fal/video/veo31_first_last_frame_to_video.py +13 -6
  31. package/templates/api/src/boards/generators/implementations/fal/video/wan_25_preview_image_to_video.py +212 -0
  32. package/templates/api/src/boards/generators/implementations/fal/video/wan_25_preview_text_to_video.py +208 -0
  33. package/templates/api/src/boards/generators/implementations/kie/__init__.py +11 -0
  34. package/templates/api/src/boards/generators/implementations/kie/base.py +316 -0
  35. package/templates/api/src/boards/generators/implementations/kie/image/__init__.py +3 -0
  36. package/templates/api/src/boards/generators/implementations/kie/image/nano_banana_edit.py +190 -0
  37. package/templates/api/src/boards/generators/implementations/kie/utils.py +98 -0
  38. package/templates/api/src/boards/generators/implementations/kie/video/__init__.py +8 -0
  39. package/templates/api/src/boards/generators/implementations/kie/video/veo3.py +161 -0
  40. package/templates/api/src/boards/graphql/resolvers/upload.py +1 -1
  41. package/templates/web/package.json +4 -1
  42. package/templates/web/src/app/boards/[boardId]/page.tsx +156 -24
  43. package/templates/web/src/app/globals.css +3 -0
  44. package/templates/web/src/app/layout.tsx +15 -5
  45. package/templates/web/src/components/boards/ArtifactInputSlots.tsx +9 -9
  46. package/templates/web/src/components/boards/ArtifactPreview.tsx +34 -18
  47. package/templates/web/src/components/boards/GenerationGrid.tsx +101 -7
  48. package/templates/web/src/components/boards/GenerationInput.tsx +21 -21
  49. package/templates/web/src/components/boards/GeneratorSelector.tsx +232 -30
  50. package/templates/web/src/components/boards/UploadArtifact.tsx +385 -75
  51. package/templates/web/src/components/header.tsx +3 -1
  52. package/templates/web/src/components/theme-provider.tsx +10 -0
  53. package/templates/web/src/components/theme-toggle.tsx +75 -0
  54. package/templates/web/src/components/ui/alert-dialog.tsx +157 -0
  55. package/templates/web/src/components/ui/toast.tsx +128 -0
  56. package/templates/web/src/components/ui/toaster.tsx +35 -0
  57. package/templates/web/src/components/ui/use-toast.ts +186 -0
@@ -0,0 +1,159 @@
1
+ """
2
+ fal.ai Kling Video AI Avatar v2 Standard generator.
3
+
4
+ Generates avatar videos by synthesizing realistic humans, animals, cartoons, or
5
+ stylized characters. Takes an image and audio as inputs to create synchronized
6
+ video output.
7
+
8
+ Based on Fal AI's fal-ai/kling-video/ai-avatar/v2/standard model.
9
+ See: https://fal.ai/models/fal-ai/kling-video/ai-avatar/v2/standard
10
+ """
11
+
12
+ import os
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import AudioArtifact, ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class KlingVideoAiAvatarV2StandardInput(BaseModel):
21
+ """Input schema for Kling Video AI Avatar v2 Standard.
22
+
23
+ Artifact fields are automatically detected via type introspection
24
+ and resolved from generation IDs to artifact objects.
25
+ """
26
+
27
+ image: ImageArtifact = Field(description="The image to use as your avatar")
28
+ audio: AudioArtifact = Field(description="The audio file for lip-sync animation")
29
+ prompt: str | None = Field(
30
+ default=".", description="The prompt to use for the video generation"
31
+ )
32
+
33
+
34
+ class FalKlingVideoAiAvatarV2StandardGenerator(BaseGenerator):
35
+ """Generator for AI-powered avatar video synthesis."""
36
+
37
+ name = "fal-kling-video-ai-avatar-v2-standard"
38
+ description = "Fal: Kling Video AI Avatar v2 Standard - Avatar video from image and audio"
39
+ artifact_type = "video"
40
+
41
+ def get_input_schema(self) -> type[KlingVideoAiAvatarV2StandardInput]:
42
+ """Return the input schema for this generator."""
43
+ return KlingVideoAiAvatarV2StandardInput
44
+
45
+ async def generate(
46
+ self, inputs: KlingVideoAiAvatarV2StandardInput, context: GeneratorExecutionContext
47
+ ) -> GeneratorResult:
48
+ """Generate avatar video using fal.ai kling-video/ai-avatar/v2/standard."""
49
+ # Check for API key
50
+ if not os.getenv("FAL_KEY"):
51
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
52
+
53
+ # Import fal_client
54
+ try:
55
+ import fal_client
56
+ except ImportError as e:
57
+ raise ImportError(
58
+ "fal.ai SDK is required for FalKlingVideoAiAvatarV2StandardGenerator. "
59
+ "Install with: pip install weirdfingers-boards[generators-fal]"
60
+ ) from e
61
+
62
+ # Upload image and audio artifacts to Fal's public storage
63
+ # Fal API requires publicly accessible URLs
64
+ from ..utils import upload_artifacts_to_fal
65
+
66
+ # Upload image and audio separately
67
+ image_urls = await upload_artifacts_to_fal([inputs.image], context)
68
+ audio_urls = await upload_artifacts_to_fal([inputs.audio], context)
69
+
70
+ # Prepare arguments for fal.ai API
71
+ arguments = {
72
+ "image_url": image_urls[0],
73
+ "audio_url": audio_urls[0],
74
+ }
75
+
76
+ # Add prompt only if provided and not the default empty value
77
+ if inputs.prompt and inputs.prompt != ".":
78
+ arguments["prompt"] = inputs.prompt
79
+
80
+ # Submit async job
81
+ handler = await fal_client.submit_async(
82
+ "fal-ai/kling-video/ai-avatar/v2/standard",
83
+ arguments=arguments,
84
+ )
85
+
86
+ # Store external job ID
87
+ await context.set_external_job_id(handler.request_id)
88
+
89
+ # Stream progress updates
90
+ from .....progress.models import ProgressUpdate
91
+
92
+ event_count = 0
93
+ async for event in handler.iter_events(with_logs=True):
94
+ event_count += 1
95
+ # Sample every 3rd event to avoid spam
96
+ if event_count % 3 == 0:
97
+ # Extract logs if available
98
+ logs = getattr(event, "logs", None)
99
+ if logs:
100
+ # Join log entries into a single message
101
+ if isinstance(logs, list):
102
+ message = " | ".join(str(log) for log in logs if log)
103
+ else:
104
+ message = str(logs)
105
+
106
+ if message:
107
+ await context.publish_progress(
108
+ ProgressUpdate(
109
+ job_id=handler.request_id,
110
+ status="processing",
111
+ progress=50.0, # Approximate mid-point progress
112
+ phase="processing",
113
+ message=message,
114
+ )
115
+ )
116
+
117
+ # Get final result
118
+ result = await handler.get()
119
+
120
+ # Extract video from result
121
+ # fal.ai returns: {"video": {"url": ..., "content_type": ...}, "duration": ...}
122
+ video_data = result.get("video")
123
+
124
+ if not video_data:
125
+ raise ValueError("No video returned from fal.ai API")
126
+
127
+ video_url = video_data.get("url")
128
+ if not video_url:
129
+ raise ValueError("Video missing URL in fal.ai response")
130
+
131
+ # Extract format from content_type (e.g., "video/mp4" -> "mp4")
132
+ content_type = video_data.get("content_type", "video/mp4")
133
+ video_format = content_type.split("/")[-1] if "/" in content_type else "mp4"
134
+
135
+ # Extract duration from response (available in API response)
136
+ duration = result.get("duration")
137
+
138
+ # Store the video result
139
+ # Use input image dimensions as base dimensions (avatar video maintains aspect ratio)
140
+ artifact = await context.store_video_result(
141
+ storage_url=video_url,
142
+ format=video_format,
143
+ width=inputs.image.width,
144
+ height=inputs.image.height,
145
+ duration=duration,
146
+ fps=None, # Not provided by API
147
+ output_index=0,
148
+ )
149
+
150
+ return GeneratorResult(outputs=[artifact])
151
+
152
+ async def estimate_cost(self, inputs: KlingVideoAiAvatarV2StandardInput) -> float:
153
+ """Estimate cost for Kling Video AI Avatar v2 Standard generation in USD.
154
+
155
+ Pricing not specified in documentation, using estimate based on
156
+ typical Kling video generation costs.
157
+ """
158
+ # Estimate per generation based on typical Kling pricing
159
+ return 0.10
@@ -0,0 +1,180 @@
1
+ """
2
+ VEED Fabric 1.0 image-to-video generator.
3
+
4
+ Generate talking videos from any image using VEED Fabric 1.0.
5
+ This generator turns a static image into a talking video with synchronized
6
+ lip movements based on provided audio.
7
+
8
+ Based on Fal AI's veed/fabric-1.0 model.
9
+ See: https://fal.ai/models/veed/fabric-1.0
10
+ """
11
+
12
+ import os
13
+ from typing import Literal
14
+
15
+ from pydantic import BaseModel, Field
16
+
17
+ from ....artifacts import AudioArtifact, ImageArtifact
18
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
19
+ from ..utils import upload_artifacts_to_fal
20
+
21
+
22
+ class VeedFabric10Input(BaseModel):
23
+ """Input schema for VEED Fabric 1.0.
24
+
25
+ Artifact fields are automatically detected via type introspection
26
+ and resolved from generation IDs to artifact objects.
27
+ """
28
+
29
+ image_url: ImageArtifact = Field(description="Image to turn into a talking video")
30
+ audio_url: AudioArtifact = Field(description="Audio to synchronize with the image")
31
+ resolution: Literal["720p", "480p"] = Field(
32
+ default="720p", description="Output video resolution"
33
+ )
34
+
35
+
36
+ class FalVeedFabric10Generator(BaseGenerator):
37
+ """Generator for turning images into talking videos using VEED Fabric 1.0."""
38
+
39
+ name = "veed-fabric-1.0"
40
+ description = "VEED: Fabric 1.0 - Turn any image into a talking video"
41
+ artifact_type = "video"
42
+
43
+ def get_input_schema(self) -> type[VeedFabric10Input]:
44
+ """Return the input schema for this generator."""
45
+ return VeedFabric10Input
46
+
47
+ async def generate(
48
+ self, inputs: VeedFabric10Input, context: GeneratorExecutionContext
49
+ ) -> GeneratorResult:
50
+ """Generate talking video using VEED Fabric 1.0."""
51
+ # Check for API key
52
+ if not os.getenv("FAL_KEY"):
53
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
54
+
55
+ # Import fal_client
56
+ try:
57
+ import fal_client
58
+ except ImportError as e:
59
+ raise ImportError(
60
+ "fal.ai SDK is required for FalVeedFabric10Generator. "
61
+ "Install with: pip install weirdfingers-boards[generators-fal]"
62
+ ) from e
63
+
64
+ # Upload image and audio artifacts to Fal's public storage
65
+ # Fal API requires publicly accessible URLs
66
+ image_urls = await upload_artifacts_to_fal([inputs.image_url], context)
67
+ audio_urls = await upload_artifacts_to_fal([inputs.audio_url], context)
68
+
69
+ # Prepare arguments for fal.ai API
70
+ arguments = {
71
+ "image_url": image_urls[0],
72
+ "audio_url": audio_urls[0],
73
+ "resolution": inputs.resolution,
74
+ }
75
+
76
+ # Submit async job
77
+ handler = await fal_client.submit_async(
78
+ "veed/fabric-1.0",
79
+ arguments=arguments,
80
+ )
81
+
82
+ # Store external job ID
83
+ await context.set_external_job_id(handler.request_id)
84
+
85
+ # Stream progress updates
86
+ from .....progress.models import ProgressUpdate
87
+
88
+ event_count = 0
89
+ async for event in handler.iter_events(with_logs=True):
90
+ event_count += 1
91
+ # Sample every 3rd event to avoid spamming progress updates
92
+ if event_count % 3 == 0:
93
+ logs = getattr(event, "logs", None)
94
+ if logs:
95
+ if isinstance(logs, list):
96
+ message = " | ".join(str(log) for log in logs if log)
97
+ else:
98
+ message = str(logs)
99
+
100
+ if message:
101
+ await context.publish_progress(
102
+ ProgressUpdate(
103
+ job_id=handler.request_id,
104
+ status="processing",
105
+ progress=50.0,
106
+ phase="processing",
107
+ message=message,
108
+ )
109
+ )
110
+
111
+ # Get final result
112
+ result = await handler.get()
113
+
114
+ # Extract video from result
115
+ # Fabric 1.0 API returns: {"video": {"url": "...", "content_type": "video/mp4", ...}}
116
+ video_data = result.get("video")
117
+
118
+ if not video_data:
119
+ raise ValueError(
120
+ "No video returned from VEED Fabric 1.0 API. "
121
+ f"Response structure: {list(result.keys())}"
122
+ )
123
+
124
+ video_url = video_data.get("url")
125
+ if not video_url:
126
+ raise ValueError(
127
+ f"Video missing URL in VEED response. Video data keys: {list(video_data.keys())}"
128
+ )
129
+
130
+ # Determine video format with fallback strategy:
131
+ # 1. Try to extract from URL extension
132
+ # 2. Parse content_type only if it's a video/* MIME type
133
+ # 3. Default to mp4
134
+ video_format = "mp4" # Default
135
+
136
+ if video_url:
137
+ url_parts = video_url.split(".")
138
+ if len(url_parts) > 1:
139
+ ext = url_parts[-1].split("?")[0].lower()
140
+ if ext in ["mp4", "webm", "mov", "avi"]:
141
+ video_format = ext
142
+
143
+ if video_format == "mp4":
144
+ content_type = video_data.get("content_type", "")
145
+ if content_type.startswith("video/"):
146
+ video_format = content_type.split("/")[-1]
147
+
148
+ # Determine output dimensions based on resolution
149
+ # 720p = 1280x720, 480p = 854x480 (16:9 aspect ratio)
150
+ if inputs.resolution == "720p":
151
+ output_width = 1280
152
+ output_height = 720
153
+ else:
154
+ output_width = 854
155
+ output_height = 480
156
+
157
+ # Store the video result
158
+ artifact = await context.store_video_result(
159
+ storage_url=video_url,
160
+ format=video_format,
161
+ width=output_width,
162
+ height=output_height,
163
+ duration=inputs.audio_url.duration,
164
+ fps=30.0, # Standard frame rate for talking videos
165
+ output_index=0,
166
+ )
167
+
168
+ return GeneratorResult(outputs=[artifact])
169
+
170
+ async def estimate_cost(self, inputs: VeedFabric10Input) -> float:
171
+ """Estimate cost for VEED Fabric 1.0 generation in USD.
172
+
173
+ Pricing not specified in documentation, using estimate based on
174
+ typical video lipsync processing costs.
175
+ """
176
+ # Fixed cost estimate of $0.08 per generation
177
+ # Based on typical AI video processing costs
178
+ # This is a conservative estimate and should be updated when official
179
+ # pricing information becomes available
180
+ return 0.08
@@ -0,0 +1,190 @@
1
+ """
2
+ Google Veo 3.1 text-to-video generator.
3
+
4
+ The most advanced AI video generation model in the world by Google, featuring
5
+ audio generation capabilities and enhanced quality over Veo 3.
6
+
7
+ Based on Fal AI's fal-ai/veo3.1 model.
8
+ See: https://fal.ai/models/fal-ai/veo3.1
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
17
+
18
+
19
+ class Veo31Input(BaseModel):
20
+ """Input schema for Google Veo 3.1 text-to-video generation."""
21
+
22
+ prompt: str = Field(description="The text prompt describing the video you want to generate")
23
+ aspect_ratio: Literal["9:16", "16:9"] = Field(
24
+ default="16:9",
25
+ description="Aspect ratio of the generated video",
26
+ )
27
+ duration: Literal["4s", "6s", "8s"] = Field(
28
+ default="8s",
29
+ description="Duration of the generated video",
30
+ )
31
+ resolution: Literal["720p", "1080p"] = Field(
32
+ default="720p",
33
+ description="Resolution of the generated video",
34
+ )
35
+ generate_audio: bool = Field(
36
+ default=True,
37
+ description="Whether to generate audio for the video. If false, 50% less credits used",
38
+ )
39
+ enhance_prompt: bool = Field(
40
+ default=True,
41
+ description="Whether to enhance video generation quality",
42
+ )
43
+ auto_fix: bool = Field(
44
+ default=True,
45
+ description="Automatically attempt to rewrite prompts that fail content policy validation",
46
+ )
47
+ seed: int | None = Field(
48
+ default=None,
49
+ description="Seed value for reproducible generation",
50
+ )
51
+ negative_prompt: str | None = Field(
52
+ default=None,
53
+ description="Guidance text to exclude elements from generation",
54
+ )
55
+
56
+
57
+ class FalVeo31Generator(BaseGenerator):
58
+ """Generator for text-to-video using Google Veo 3.1."""
59
+
60
+ name = "fal-veo31"
61
+ description = "Fal: Veo 3.1 - Google's most advanced AI video generation model with audio"
62
+ artifact_type = "video"
63
+
64
+ def get_input_schema(self) -> type[Veo31Input]:
65
+ """Return the input schema for this generator."""
66
+ return Veo31Input
67
+
68
+ async def generate(
69
+ self, inputs: Veo31Input, context: GeneratorExecutionContext
70
+ ) -> GeneratorResult:
71
+ """Generate video using fal.ai veo3.1."""
72
+ # Check for API key
73
+ if not os.getenv("FAL_KEY"):
74
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
75
+
76
+ # Import fal_client
77
+ try:
78
+ import fal_client
79
+ except ImportError as e:
80
+ raise ImportError(
81
+ "fal.ai SDK is required for FalVeo31Generator. "
82
+ "Install with: pip install weirdfingers-boards[generators-fal]"
83
+ ) from e
84
+
85
+ # Prepare arguments for fal.ai API
86
+ arguments = {
87
+ "prompt": inputs.prompt,
88
+ "aspect_ratio": inputs.aspect_ratio,
89
+ "duration": inputs.duration,
90
+ "resolution": inputs.resolution,
91
+ "generate_audio": inputs.generate_audio,
92
+ "enhance_prompt": inputs.enhance_prompt,
93
+ "auto_fix": inputs.auto_fix,
94
+ }
95
+
96
+ # Add optional parameters if provided
97
+ if inputs.seed is not None:
98
+ arguments["seed"] = inputs.seed
99
+ if inputs.negative_prompt is not None:
100
+ arguments["negative_prompt"] = inputs.negative_prompt
101
+
102
+ # Submit async job
103
+ handler = await fal_client.submit_async(
104
+ "fal-ai/veo3.1",
105
+ arguments=arguments,
106
+ )
107
+
108
+ # Store external job ID
109
+ await context.set_external_job_id(handler.request_id)
110
+
111
+ # Stream progress updates
112
+ from .....progress.models import ProgressUpdate
113
+
114
+ event_count = 0
115
+ async for event in handler.iter_events(with_logs=True):
116
+ event_count += 1
117
+ # Sample every 3rd event to avoid spam
118
+ if event_count % 3 == 0:
119
+ # Extract logs if available
120
+ logs = getattr(event, "logs", None)
121
+ if logs:
122
+ # Join log entries into a single message
123
+ if isinstance(logs, list):
124
+ message = " | ".join(str(log) for log in logs if log)
125
+ else:
126
+ message = str(logs)
127
+
128
+ if message:
129
+ await context.publish_progress(
130
+ ProgressUpdate(
131
+ job_id=handler.request_id,
132
+ status="processing",
133
+ progress=50.0,
134
+ phase="processing",
135
+ message=message,
136
+ )
137
+ )
138
+
139
+ # Get final result
140
+ result = await handler.get()
141
+
142
+ # Extract video from result
143
+ # Expected structure: {"video": {"url": "...", "content_type": "...", ...}}
144
+ video_data = result.get("video")
145
+ if not video_data:
146
+ raise ValueError("No video returned from fal.ai API")
147
+
148
+ video_url = video_data.get("url")
149
+ if not video_url:
150
+ raise ValueError("Video missing URL in fal.ai response")
151
+
152
+ # Determine video dimensions based on resolution and aspect ratio
153
+ if inputs.resolution == "720p":
154
+ if inputs.aspect_ratio == "16:9":
155
+ width, height = 1280, 720
156
+ else: # 9:16
157
+ width, height = 720, 1280
158
+ else: # 1080p
159
+ if inputs.aspect_ratio == "16:9":
160
+ width, height = 1920, 1080
161
+ else: # 9:16
162
+ width, height = 1080, 1920
163
+
164
+ # Parse duration from "8s" format
165
+ duration_seconds = int(inputs.duration.rstrip("s"))
166
+
167
+ # Store video result
168
+ artifact = await context.store_video_result(
169
+ storage_url=video_url,
170
+ format="mp4",
171
+ width=width,
172
+ height=height,
173
+ duration=duration_seconds,
174
+ output_index=0,
175
+ )
176
+
177
+ return GeneratorResult(outputs=[artifact])
178
+
179
+ async def estimate_cost(self, inputs: Veo31Input) -> float:
180
+ """Estimate cost for this generation in USD.
181
+
182
+ Note: Pricing information not available in Fal documentation.
183
+ Using placeholder value that should be updated with actual pricing.
184
+ """
185
+ # TODO: Update with actual pricing from Fal when available
186
+ # Base cost, with 50% reduction if audio is disabled
187
+ base_cost = 0.15 # Placeholder estimate
188
+ if not inputs.generate_audio:
189
+ return base_cost * 0.5
190
+ return base_cost