@weirdfingers/baseboards 0.4.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@weirdfingers/baseboards",
3
- "version": "0.4.1",
3
+ "version": "0.5.0",
4
4
  "description": "One-command launcher for the Boards image generation application",
5
5
  "type": "module",
6
6
  "bin": {
@@ -38,12 +38,21 @@ generators:
38
38
  - class: "boards.generators.implementations.fal.image.nano_banana.FalNanoBananaGenerator"
39
39
  enabled: true
40
40
 
41
+ - class: "boards.generators.implementations.fal.image.nano_banana_pro.FalNanoBananaProGenerator"
42
+ enabled: true
43
+
41
44
  - class: "boards.generators.implementations.fal.video.sync_lipsync_v2.FalSyncLipsyncV2Generator"
42
45
  enabled: true
43
46
 
44
47
  - class: "boards.generators.implementations.fal.video.veo31_first_last_frame_to_video.FalVeo31FirstLastFrameToVideoGenerator"
45
48
  enabled: true
46
49
 
50
+ - class: "boards.generators.implementations.fal.video.veo31_image_to_video.FalVeo31ImageToVideoGenerator"
51
+ enabled: true
52
+
53
+ - class: "boards.generators.implementations.fal.video.veo31_reference_to_video.FalVeo31ReferenceToVideoGenerator"
54
+ enabled: true
55
+
47
56
  # OpenAI generators
48
57
  - class: "boards.generators.implementations.openai.image.dalle3.OpenAIDallE3Generator"
49
58
  enabled: true
@@ -3,7 +3,7 @@ Boards Backend SDK
3
3
  Open-source creative toolkit for AI-generated content
4
4
  """
5
5
 
6
- __version__ = "0.4.1"
6
+ __version__ = "0.5.0"
7
7
 
8
8
  from .config import settings
9
9
 
@@ -4,7 +4,7 @@ Configuration management for Boards backend
4
4
 
5
5
  import os
6
6
 
7
- from pydantic_settings import BaseSettings
7
+ from pydantic_settings import BaseSettings, SettingsConfigDict
8
8
 
9
9
 
10
10
  class Settings(BaseSettings):
@@ -85,13 +85,13 @@ class Settings(BaseSettings):
85
85
  ".json", # Text
86
86
  ]
87
87
 
88
- class Config:
89
- env_file = ".env"
90
- env_prefix = "BOARDS_"
91
- case_sensitive = False
92
-
88
+ model_config = SettingsConfigDict(
89
+ env_file=".env",
90
+ env_prefix="BOARDS_",
91
+ case_sensitive=False,
93
92
  # Allow extra fields for provider-specific configs
94
- extra = "allow"
93
+ extra="allow",
94
+ )
95
95
 
96
96
 
97
97
  # Global settings instance
@@ -6,6 +6,7 @@ from .imagen4_preview import FalImagen4PreviewGenerator
6
6
  from .imagen4_preview_fast import FalImagen4PreviewFastGenerator
7
7
  from .nano_banana import FalNanoBananaGenerator
8
8
  from .nano_banana_edit import FalNanoBananaEditGenerator
9
+ from .nano_banana_pro import FalNanoBananaProGenerator
9
10
 
10
11
  __all__ = [
11
12
  "FalFluxProKontextGenerator",
@@ -14,4 +15,5 @@ __all__ = [
14
15
  "FalImagen4PreviewFastGenerator",
15
16
  "FalNanoBananaGenerator",
16
17
  "FalNanoBananaEditGenerator",
18
+ "FalNanoBananaProGenerator",
17
19
  ]
@@ -0,0 +1,179 @@
1
+ """
2
+ fal.ai nano-banana-pro text-to-image generator.
3
+
4
+ State-of-the-art image generation using Google's latest model, specializing in
5
+ realism and typography applications.
6
+
7
+ See: https://fal.ai/models/fal-ai/nano-banana-pro
8
+ """
9
+
10
+ import os
11
+ from typing import Literal
12
+
13
+ from pydantic import BaseModel, Field
14
+
15
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
16
+
17
+
18
+ class NanoBananaProInput(BaseModel):
19
+ """Input schema for nano-banana-pro image generation."""
20
+
21
+ prompt: str = Field(
22
+ min_length=3,
23
+ max_length=50000,
24
+ description="The text prompt to generate an image from",
25
+ )
26
+ aspect_ratio: Literal[
27
+ "21:9",
28
+ "16:9",
29
+ "3:2",
30
+ "4:3",
31
+ "5:4",
32
+ "1:1",
33
+ "4:5",
34
+ "3:4",
35
+ "2:3",
36
+ "9:16",
37
+ ] = Field(
38
+ default="1:1",
39
+ description="Image aspect ratio",
40
+ )
41
+ num_images: int = Field(
42
+ default=1,
43
+ ge=1,
44
+ le=4,
45
+ description="Number of images to generate in batch",
46
+ )
47
+ resolution: Literal["1K", "2K", "4K"] = Field(
48
+ default="1K",
49
+ description="Image resolution (1K, 2K, or 4K)",
50
+ )
51
+ output_format: Literal["jpeg", "png", "webp"] = Field(
52
+ default="png",
53
+ description="Output image format",
54
+ )
55
+ sync_mode: bool = Field(
56
+ default=True,
57
+ description="Use synchronous mode (wait for completion)",
58
+ )
59
+
60
+
61
+ class FalNanoBananaProGenerator(BaseGenerator):
62
+ """nano-banana-pro image generator using fal.ai.
63
+
64
+ Google's state-of-the-art image generation and editing model, specializing
65
+ in realism and typography applications.
66
+ """
67
+
68
+ name = "fal-nano-banana-pro"
69
+ artifact_type = "image"
70
+ description = (
71
+ "Fal: nano-banana-pro - Google's state-of-the-art image generation "
72
+ "with excellent realism and typography"
73
+ )
74
+
75
+ def get_input_schema(self) -> type[NanoBananaProInput]:
76
+ return NanoBananaProInput
77
+
78
+ async def generate(
79
+ self, inputs: NanoBananaProInput, context: GeneratorExecutionContext
80
+ ) -> GeneratorResult:
81
+ """Generate images using fal.ai nano-banana-pro model."""
82
+ # Check for API key (fal-client uses FAL_KEY environment variable)
83
+ if not os.getenv("FAL_KEY"):
84
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
85
+
86
+ # Import fal_client
87
+ try:
88
+ import fal_client
89
+ except ImportError as e:
90
+ raise ImportError(
91
+ "fal.ai SDK is required for NanoBananaProGenerator. "
92
+ "Install with: pip install weirdfingers-boards[generators-fal]"
93
+ ) from e
94
+
95
+ # Prepare arguments for fal.ai API
96
+ arguments = {
97
+ "prompt": inputs.prompt,
98
+ "aspect_ratio": inputs.aspect_ratio,
99
+ "num_images": inputs.num_images,
100
+ "resolution": inputs.resolution,
101
+ "output_format": inputs.output_format,
102
+ "sync_mode": inputs.sync_mode,
103
+ }
104
+
105
+ # Submit async job and get handler
106
+ handler = await fal_client.submit_async(
107
+ "fal-ai/nano-banana-pro",
108
+ arguments=arguments,
109
+ )
110
+
111
+ # Store the external job ID for tracking
112
+ await context.set_external_job_id(handler.request_id)
113
+
114
+ # Stream progress updates (sample every 3rd event to avoid spam)
115
+ from .....progress.models import ProgressUpdate
116
+
117
+ event_count = 0
118
+ async for event in handler.iter_events(with_logs=True):
119
+ event_count += 1
120
+
121
+ # Process every 3rd event to provide feedback without overwhelming
122
+ if event_count % 3 == 0:
123
+ # Extract logs if available
124
+ logs = getattr(event, "logs", None)
125
+ if logs:
126
+ # Join log entries into a single message
127
+ if isinstance(logs, list):
128
+ message = " | ".join(str(log) for log in logs if log)
129
+ else:
130
+ message = str(logs)
131
+
132
+ if message:
133
+ await context.publish_progress(
134
+ ProgressUpdate(
135
+ job_id=handler.request_id,
136
+ status="processing",
137
+ progress=50.0, # Approximate mid-point progress
138
+ phase="processing",
139
+ message=message,
140
+ )
141
+ )
142
+
143
+ # Get final result
144
+ result = await handler.get()
145
+
146
+ # Extract image URLs from result
147
+ # fal.ai returns: {"images": [{"url": "...", "width": ..., "height": ...}, ...]}
148
+ images = result.get("images", [])
149
+ if not images:
150
+ raise ValueError("No images returned from fal.ai API")
151
+
152
+ # Store each image using output_index
153
+ artifacts = []
154
+ for idx, image_data in enumerate(images):
155
+ image_url = image_data.get("url")
156
+ width = image_data.get("width")
157
+ height = image_data.get("height")
158
+
159
+ if not image_url:
160
+ raise ValueError(f"Image {idx} missing URL in fal.ai response")
161
+
162
+ # Store with appropriate output_index
163
+ artifact = await context.store_image_result(
164
+ storage_url=image_url,
165
+ format=inputs.output_format,
166
+ width=width,
167
+ height=height,
168
+ output_index=idx,
169
+ )
170
+ artifacts.append(artifact)
171
+
172
+ return GeneratorResult(outputs=artifacts)
173
+
174
+ async def estimate_cost(self, inputs: NanoBananaProInput) -> float:
175
+ """Estimate cost for nano-banana-pro generation.
176
+
177
+ nano-banana-pro is a premium model costing approximately $0.039 per image.
178
+ """
179
+ return 0.039 * inputs.num_images # $0.039 per image, scaled by batch size
@@ -5,9 +5,13 @@ from .kling_video_v2_5_turbo_pro_text_to_video import (
5
5
  )
6
6
  from .sync_lipsync_v2 import FalSyncLipsyncV2Generator
7
7
  from .veo31_first_last_frame_to_video import FalVeo31FirstLastFrameToVideoGenerator
8
+ from .veo31_image_to_video import FalVeo31ImageToVideoGenerator
9
+ from .veo31_reference_to_video import FalVeo31ReferenceToVideoGenerator
8
10
 
9
11
  __all__ = [
10
12
  "FalKlingVideoV25TurboProTextToVideoGenerator",
11
13
  "FalSyncLipsyncV2Generator",
12
14
  "FalVeo31FirstLastFrameToVideoGenerator",
15
+ "FalVeo31ImageToVideoGenerator",
16
+ "FalVeo31ReferenceToVideoGenerator",
13
17
  ]
@@ -0,0 +1,183 @@
1
+ """
2
+ Google Veo 3.1 image-to-video generator.
3
+
4
+ Converts static images into animated videos based on text prompts using
5
+ Google's Veo 3.1 technology via fal.ai.
6
+
7
+ Based on Fal AI's fal-ai/veo3.1/image-to-video model.
8
+ See: https://fal.ai/models/fal-ai/veo3.1/image-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class Veo31ImageToVideoInput(BaseModel):
21
+ """Input schema for Veo 3.1 image-to-video generation.
22
+
23
+ Artifact fields (image) are automatically detected via type introspection
24
+ and resolved from generation IDs to ImageArtifact objects.
25
+ """
26
+
27
+ prompt: str = Field(description="Text prompt describing the desired video content and motion")
28
+ image: ImageArtifact = Field(
29
+ description="Input image to animate. Should be 720p or higher in 16:9 or 9:16 aspect ratio"
30
+ )
31
+ aspect_ratio: Literal["9:16", "16:9"] = Field(
32
+ default="16:9",
33
+ description="Aspect ratio of the generated video",
34
+ )
35
+ duration: Literal["4s", "6s", "8s"] = Field(
36
+ default="8s",
37
+ description="Duration of the generated video in seconds",
38
+ )
39
+ generate_audio: bool = Field(
40
+ default=True,
41
+ description="Whether to generate audio for the video. Disabling uses 50% fewer credits",
42
+ )
43
+ resolution: Literal["720p", "1080p"] = Field(
44
+ default="720p",
45
+ description="Resolution of the generated video",
46
+ )
47
+
48
+
49
+ class FalVeo31ImageToVideoGenerator(BaseGenerator):
50
+ """Generator for creating videos from static images using Google Veo 3.1."""
51
+
52
+ name = "fal-veo31-image-to-video"
53
+ description = "Fal: Veo 3.1 - Convert images to videos with text-guided animation"
54
+ artifact_type = "video"
55
+
56
+ def get_input_schema(self) -> type[Veo31ImageToVideoInput]:
57
+ """Return the input schema for this generator."""
58
+ return Veo31ImageToVideoInput
59
+
60
+ async def generate(
61
+ self, inputs: Veo31ImageToVideoInput, context: GeneratorExecutionContext
62
+ ) -> GeneratorResult:
63
+ """Generate video using fal.ai veo3.1/image-to-video."""
64
+ # Check for API key
65
+ if not os.getenv("FAL_KEY"):
66
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
67
+
68
+ # Import fal_client
69
+ try:
70
+ import fal_client
71
+ except ImportError as e:
72
+ raise ImportError(
73
+ "fal.ai SDK is required for FalVeo31ImageToVideoGenerator. "
74
+ "Install with: pip install weirdfingers-boards[generators-fal]"
75
+ ) from e
76
+
77
+ # Upload image artifact to Fal's public storage
78
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
79
+ # - Localhost URLs (not publicly accessible)
80
+ # - Private S3 buckets (not publicly accessible)
81
+ # So we upload to Fal's temporary storage first
82
+ from ..utils import upload_artifacts_to_fal
83
+
84
+ image_urls = await upload_artifacts_to_fal([inputs.image], context)
85
+
86
+ # Prepare arguments for fal.ai API
87
+ arguments = {
88
+ "prompt": inputs.prompt,
89
+ "image_url": image_urls[0],
90
+ "aspect_ratio": inputs.aspect_ratio,
91
+ "duration": inputs.duration,
92
+ "generate_audio": inputs.generate_audio,
93
+ "resolution": inputs.resolution,
94
+ }
95
+
96
+ # Submit async job
97
+ handler = await fal_client.submit_async(
98
+ "fal-ai/veo3.1/image-to-video",
99
+ arguments=arguments,
100
+ )
101
+
102
+ # Store external job ID
103
+ await context.set_external_job_id(handler.request_id)
104
+
105
+ # Stream progress updates
106
+ from .....progress.models import ProgressUpdate
107
+
108
+ event_count = 0
109
+ async for event in handler.iter_events(with_logs=True):
110
+ event_count += 1
111
+ # Sample every 3rd event to avoid spam
112
+ if event_count % 3 == 0:
113
+ # Extract logs if available
114
+ logs = getattr(event, "logs", None)
115
+ if logs:
116
+ # Join log entries into a single message
117
+ if isinstance(logs, list):
118
+ message = " | ".join(str(log) for log in logs if log)
119
+ else:
120
+ message = str(logs)
121
+
122
+ if message:
123
+ await context.publish_progress(
124
+ ProgressUpdate(
125
+ job_id=handler.request_id,
126
+ status="processing",
127
+ progress=50.0,
128
+ phase="processing",
129
+ message=message,
130
+ )
131
+ )
132
+
133
+ # Get final result
134
+ result = await handler.get()
135
+
136
+ # Extract video from result
137
+ # Expected structure: {"video": {"url": "...", "content_type": "...", ...}}
138
+ video_data = result.get("video")
139
+ if not video_data:
140
+ raise ValueError("No video returned from fal.ai API")
141
+
142
+ video_url = video_data.get("url")
143
+ if not video_url:
144
+ raise ValueError("Video missing URL in fal.ai response")
145
+
146
+ # Calculate video dimensions based on resolution and aspect ratio
147
+ if inputs.resolution == "720p":
148
+ if inputs.aspect_ratio == "16:9":
149
+ width, height = 1280, 720
150
+ else: # 9:16
151
+ width, height = 720, 1280
152
+ else: # 1080p
153
+ if inputs.aspect_ratio == "16:9":
154
+ width, height = 1920, 1080
155
+ else: # 9:16
156
+ width, height = 1080, 1920
157
+
158
+ # Parse duration from "Xs" format
159
+ duration_seconds = int(inputs.duration.rstrip("s"))
160
+
161
+ artifact = await context.store_video_result(
162
+ storage_url=video_url,
163
+ format="mp4",
164
+ width=width,
165
+ height=height,
166
+ duration=duration_seconds,
167
+ output_index=0,
168
+ )
169
+
170
+ return GeneratorResult(outputs=[artifact])
171
+
172
+ async def estimate_cost(self, inputs: Veo31ImageToVideoInput) -> float:
173
+ """Estimate cost for this generation in USD.
174
+
175
+ Note: Pricing information not available in Fal documentation.
176
+ Using placeholder value that should be updated with actual pricing.
177
+ """
178
+ # TODO: Update with actual pricing from Fal when available
179
+ # Base cost, with 50% reduction if audio is disabled
180
+ base_cost = 0.15 # Placeholder estimate
181
+ if not inputs.generate_audio:
182
+ return base_cost * 0.5
183
+ return base_cost
@@ -0,0 +1,172 @@
1
+ """
2
+ Google Veo 3.1 reference-to-video generator.
3
+
4
+ Generates videos from multiple reference images to maintain consistent subject
5
+ appearance while creating dynamic video content based on text prompts.
6
+
7
+ Based on Fal AI's fal-ai/veo3.1/reference-to-video model.
8
+ See: https://fal.ai/models/fal-ai/veo3.1/reference-to-video
9
+ """
10
+
11
+ import os
12
+ from typing import Literal
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from ....artifacts import ImageArtifact
17
+ from ....base import BaseGenerator, GeneratorExecutionContext, GeneratorResult
18
+
19
+
20
+ class Veo31ReferenceToVideoInput(BaseModel):
21
+ """Input schema for Veo 3.1 reference-to-video generation.
22
+
23
+ Artifact fields (image_urls) are automatically detected via type
24
+ introspection and resolved from generation IDs to ImageArtifact objects.
25
+ """
26
+
27
+ image_urls: list[ImageArtifact] = Field(
28
+ description="URLs of reference images for consistent subject appearance"
29
+ )
30
+ prompt: str = Field(description="Text description of desired video content")
31
+ duration: Literal["8s"] = Field(
32
+ default="8s",
33
+ description="Duration of the generated video in seconds (currently only 8s is supported)",
34
+ )
35
+ resolution: Literal["720p", "1080p"] = Field(
36
+ default="720p",
37
+ description="Resolution of the generated video",
38
+ )
39
+ generate_audio: bool = Field(
40
+ default=True,
41
+ description="Whether to generate audio for the video. Disabling uses 50% fewer credits",
42
+ )
43
+
44
+
45
+ class FalVeo31ReferenceToVideoGenerator(BaseGenerator):
46
+ """Generator for creating videos from reference images using Google Veo 3.1."""
47
+
48
+ name = "fal-veo31-reference-to-video"
49
+ description = "Fal: Veo 3.1 - Generate videos from reference images with consistent subjects"
50
+ artifact_type = "video"
51
+
52
+ def get_input_schema(self) -> type[Veo31ReferenceToVideoInput]:
53
+ """Return the input schema for this generator."""
54
+ return Veo31ReferenceToVideoInput
55
+
56
+ async def generate(
57
+ self, inputs: Veo31ReferenceToVideoInput, context: GeneratorExecutionContext
58
+ ) -> GeneratorResult:
59
+ """Generate video using fal.ai veo3.1/reference-to-video."""
60
+ # Check for API key
61
+ if not os.getenv("FAL_KEY"):
62
+ raise ValueError("API configuration invalid. Missing FAL_KEY environment variable")
63
+
64
+ # Import fal_client
65
+ try:
66
+ import fal_client
67
+ except ImportError as e:
68
+ raise ImportError(
69
+ "fal.ai SDK is required for FalVeo31ReferenceToVideoGenerator. "
70
+ "Install with: pip install weirdfingers-boards[generators-fal]"
71
+ ) from e
72
+
73
+ # Upload image artifacts to Fal's public storage
74
+ # Fal API requires publicly accessible URLs, but our storage_url might be:
75
+ # - Localhost URLs (not publicly accessible)
76
+ # - Private S3 buckets (not publicly accessible)
77
+ # So we upload to Fal's temporary storage first
78
+ from ..utils import upload_artifacts_to_fal
79
+
80
+ reference_image_urls = await upload_artifacts_to_fal(inputs.image_urls, context)
81
+
82
+ # Prepare arguments for fal.ai API
83
+ arguments = {
84
+ "image_urls": reference_image_urls,
85
+ "prompt": inputs.prompt,
86
+ "duration": inputs.duration,
87
+ "resolution": inputs.resolution,
88
+ "generate_audio": inputs.generate_audio,
89
+ }
90
+
91
+ # Submit async job
92
+ handler = await fal_client.submit_async(
93
+ "fal-ai/veo3.1/reference-to-video",
94
+ arguments=arguments,
95
+ )
96
+
97
+ # Store external job ID
98
+ await context.set_external_job_id(handler.request_id)
99
+
100
+ # Stream progress updates
101
+ from .....progress.models import ProgressUpdate
102
+
103
+ event_count = 0
104
+ async for event in handler.iter_events(with_logs=True):
105
+ event_count += 1
106
+ # Sample every 3rd event to avoid spam
107
+ if event_count % 3 == 0:
108
+ # Extract logs if available
109
+ logs = getattr(event, "logs", None)
110
+ if logs:
111
+ # Join log entries into a single message
112
+ if isinstance(logs, list):
113
+ message = " | ".join(str(log) for log in logs if log)
114
+ else:
115
+ message = str(logs)
116
+
117
+ if message:
118
+ await context.publish_progress(
119
+ ProgressUpdate(
120
+ job_id=handler.request_id,
121
+ status="processing",
122
+ progress=50.0,
123
+ phase="processing",
124
+ message=message,
125
+ )
126
+ )
127
+
128
+ # Get final result
129
+ result = await handler.get()
130
+
131
+ # Extract video from result
132
+ # Expected structure: {"video": {"url": "...", "content_type": "...", ...}}
133
+ video_data = result.get("video")
134
+ if not video_data:
135
+ raise ValueError("No video returned from fal.ai API")
136
+
137
+ video_url = video_data.get("url")
138
+ if not video_url:
139
+ raise ValueError("Video missing URL in fal.ai response")
140
+
141
+ # Store video result
142
+ # Note: Fal API doesn't provide video dimensions/duration in the response,
143
+ # so we'll use defaults based on input parameters
144
+ width = 1280 if inputs.resolution == "720p" else 1920
145
+ height = 720 if inputs.resolution == "720p" else 1080
146
+
147
+ # Parse duration from "8s" format
148
+ duration_seconds = int(inputs.duration.rstrip("s"))
149
+
150
+ artifact = await context.store_video_result(
151
+ storage_url=video_url,
152
+ format="mp4",
153
+ width=width,
154
+ height=height,
155
+ duration=duration_seconds,
156
+ output_index=0,
157
+ )
158
+
159
+ return GeneratorResult(outputs=[artifact])
160
+
161
+ async def estimate_cost(self, inputs: Veo31ReferenceToVideoInput) -> float:
162
+ """Estimate cost for this generation in USD.
163
+
164
+ Note: Pricing information not available in Fal documentation.
165
+ Using placeholder value that should be updated with actual pricing.
166
+ """
167
+ # TODO: Update with actual pricing from Fal when available
168
+ # Base cost, with 50% reduction if audio is disabled
169
+ base_cost = 0.15 # Placeholder estimate
170
+ if not inputs.generate_audio:
171
+ return base_cost * 0.5
172
+ return base_cost
@@ -115,7 +115,7 @@ async def create_batch_generation(
115
115
  input_params: dict,
116
116
  batch_id: str,
117
117
  batch_index: int,
118
- ) -> Generations:
118
+ ) -> str:
119
119
  """Create a batch generation record for multi-output generators.
120
120
 
121
121
  This creates a new generation record that is part of a batch, with
@@ -133,7 +133,7 @@ async def create_batch_generation(
133
133
  batch_index: Index of this output in the batch
134
134
 
135
135
  Returns:
136
- Created generation record
136
+ ID of the created generation record
137
137
  """
138
138
  gen = Generations()
139
139
  gen.tenant_id = tenant_id
@@ -150,4 +150,4 @@ async def create_batch_generation(
150
150
  }
151
151
  session.add(gen)
152
152
  await session.flush()
153
- return gen
153
+ return str(gen.id)
@@ -6,7 +6,12 @@ from uuid import UUID, uuid4
6
6
 
7
7
  from ..database.connection import get_async_session
8
8
  from ..generators import resolution
9
- from ..generators.artifacts import AudioArtifact, ImageArtifact, TextArtifact, VideoArtifact
9
+ from ..generators.artifacts import (
10
+ AudioArtifact,
11
+ ImageArtifact,
12
+ TextArtifact,
13
+ VideoArtifact,
14
+ )
10
15
  from ..jobs import repository as jobs_repo
11
16
  from ..logging import get_logger
12
17
  from ..progress.models import ProgressUpdate
@@ -319,7 +324,7 @@ class GeneratorExecutionContext:
319
324
 
320
325
  # Create new batch generation record
321
326
  async with get_async_session() as session:
322
- batch_gen = await jobs_repo.create_batch_generation(
327
+ batch_gen_id = await jobs_repo.create_batch_generation(
323
328
  session,
324
329
  tenant_id=UUID(self.tenant_id),
325
330
  board_id=UUID(self.board_id),
@@ -331,7 +336,6 @@ class GeneratorExecutionContext:
331
336
  batch_index=output_index,
332
337
  )
333
338
  await session.commit()
334
- batch_gen_id = str(batch_gen.id)
335
339
 
336
340
  self._batch_generations.append(batch_gen_id)
337
341
  logger.info(