yamlgraph 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of yamlgraph might be problematic. Click here for more details.
- examples/__init__.py +1 -0
- examples/storyboard/__init__.py +1 -0
- examples/storyboard/generate_videos.py +335 -0
- examples/storyboard/nodes/__init__.py +10 -0
- examples/storyboard/nodes/animated_character_node.py +248 -0
- examples/storyboard/nodes/animated_image_node.py +138 -0
- examples/storyboard/nodes/character_node.py +162 -0
- examples/storyboard/nodes/image_node.py +118 -0
- examples/storyboard/nodes/replicate_tool.py +238 -0
- examples/storyboard/retry_images.py +118 -0
- tests/__init__.py +1 -0
- tests/conftest.py +178 -0
- tests/integration/__init__.py +1 -0
- tests/integration/test_animated_storyboard.py +63 -0
- tests/integration/test_cli_commands.py +242 -0
- tests/integration/test_map_demo.py +50 -0
- tests/integration/test_memory_demo.py +281 -0
- tests/integration/test_pipeline_flow.py +105 -0
- tests/integration/test_providers.py +163 -0
- tests/integration/test_resume.py +75 -0
- tests/unit/__init__.py +1 -0
- tests/unit/test_agent_nodes.py +200 -0
- tests/unit/test_checkpointer.py +212 -0
- tests/unit/test_cli.py +121 -0
- tests/unit/test_cli_package.py +81 -0
- tests/unit/test_compile_graph_map.py +132 -0
- tests/unit/test_conditions_routing.py +253 -0
- tests/unit/test_config.py +93 -0
- tests/unit/test_conversation_memory.py +270 -0
- tests/unit/test_database.py +145 -0
- tests/unit/test_deprecation.py +104 -0
- tests/unit/test_executor.py +60 -0
- tests/unit/test_executor_async.py +179 -0
- tests/unit/test_export.py +150 -0
- tests/unit/test_expressions.py +178 -0
- tests/unit/test_format_prompt.py +145 -0
- tests/unit/test_generic_report.py +200 -0
- tests/unit/test_graph_commands.py +327 -0
- tests/unit/test_graph_loader.py +299 -0
- tests/unit/test_graph_schema.py +193 -0
- tests/unit/test_inline_schema.py +151 -0
- tests/unit/test_issues.py +164 -0
- tests/unit/test_jinja2_prompts.py +85 -0
- tests/unit/test_langsmith.py +319 -0
- tests/unit/test_llm_factory.py +109 -0
- tests/unit/test_llm_factory_async.py +118 -0
- tests/unit/test_loops.py +403 -0
- tests/unit/test_map_node.py +144 -0
- tests/unit/test_no_backward_compat.py +56 -0
- tests/unit/test_node_factory.py +225 -0
- tests/unit/test_prompts.py +166 -0
- tests/unit/test_python_nodes.py +198 -0
- tests/unit/test_reliability.py +298 -0
- tests/unit/test_result_export.py +234 -0
- tests/unit/test_router.py +296 -0
- tests/unit/test_sanitize.py +99 -0
- tests/unit/test_schema_loader.py +295 -0
- tests/unit/test_shell_tools.py +229 -0
- tests/unit/test_state_builder.py +331 -0
- tests/unit/test_state_builder_map.py +104 -0
- tests/unit/test_state_config.py +197 -0
- tests/unit/test_template.py +190 -0
- tests/unit/test_tool_nodes.py +129 -0
- yamlgraph/__init__.py +35 -0
- yamlgraph/builder.py +110 -0
- yamlgraph/cli/__init__.py +139 -0
- yamlgraph/cli/__main__.py +6 -0
- yamlgraph/cli/commands.py +232 -0
- yamlgraph/cli/deprecation.py +92 -0
- yamlgraph/cli/graph_commands.py +382 -0
- yamlgraph/cli/validators.py +37 -0
- yamlgraph/config.py +67 -0
- yamlgraph/constants.py +66 -0
- yamlgraph/error_handlers.py +226 -0
- yamlgraph/executor.py +275 -0
- yamlgraph/executor_async.py +122 -0
- yamlgraph/graph_loader.py +337 -0
- yamlgraph/map_compiler.py +138 -0
- yamlgraph/models/__init__.py +36 -0
- yamlgraph/models/graph_schema.py +141 -0
- yamlgraph/models/schemas.py +124 -0
- yamlgraph/models/state_builder.py +236 -0
- yamlgraph/node_factory.py +240 -0
- yamlgraph/routing.py +87 -0
- yamlgraph/schema_loader.py +160 -0
- yamlgraph/storage/__init__.py +17 -0
- yamlgraph/storage/checkpointer.py +72 -0
- yamlgraph/storage/database.py +320 -0
- yamlgraph/storage/export.py +269 -0
- yamlgraph/tools/__init__.py +1 -0
- yamlgraph/tools/agent.py +235 -0
- yamlgraph/tools/nodes.py +124 -0
- yamlgraph/tools/python_tool.py +178 -0
- yamlgraph/tools/shell.py +205 -0
- yamlgraph/utils/__init__.py +47 -0
- yamlgraph/utils/conditions.py +157 -0
- yamlgraph/utils/expressions.py +111 -0
- yamlgraph/utils/langsmith.py +308 -0
- yamlgraph/utils/llm_factory.py +118 -0
- yamlgraph/utils/llm_factory_async.py +105 -0
- yamlgraph/utils/logging.py +127 -0
- yamlgraph/utils/prompts.py +116 -0
- yamlgraph/utils/sanitize.py +98 -0
- yamlgraph/utils/template.py +102 -0
- yamlgraph/utils/validators.py +181 -0
- yamlgraph-0.1.1.dist-info/METADATA +854 -0
- yamlgraph-0.1.1.dist-info/RECORD +111 -0
- yamlgraph-0.1.1.dist-info/WHEEL +5 -0
- yamlgraph-0.1.1.dist-info/entry_points.txt +2 -0
- yamlgraph-0.1.1.dist-info/licenses/LICENSE +21 -0
- yamlgraph-0.1.1.dist-info/top_level.txt +3 -0
examples/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Examples package - sample projects demonstrating framework features."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Storyboard example - demonstrates Python node for image generation."""
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Generate video clips from consecutive image pairs.
|
|
3
|
+
|
|
4
|
+
Takes a folder of images, sorts alphabetically, and generates
|
|
5
|
+
video clips for each consecutive pair (1→2, 2→3, 3→4, etc.)
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python examples/storyboard/generate_videos.py outputs/storyboard/20260117_112419/animated
|
|
9
|
+
|
|
10
|
+
Options:
|
|
11
|
+
--pattern GLOB File pattern to match (default: *.png)
|
|
12
|
+
--prompt TEXT Prompt for video generation
|
|
13
|
+
--fps INT Frames per second (default: 16)
|
|
14
|
+
--frames INT Number of frames (default: 81)
|
|
15
|
+
--resolution STR Resolution: 480p, 720p (default: 480p)
|
|
16
|
+
--dry-run Show what would be generated without running
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import argparse
|
|
22
|
+
import json
|
|
23
|
+
import logging
|
|
24
|
+
import os
|
|
25
|
+
import subprocess
|
|
26
|
+
import sys
|
|
27
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
28
|
+
from pathlib import Path
|
|
29
|
+
|
|
30
|
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
# Check if replicate is available
|
|
34
|
+
try:
|
|
35
|
+
import replicate
|
|
36
|
+
|
|
37
|
+
REPLICATE_AVAILABLE = True
|
|
38
|
+
except ImportError:
|
|
39
|
+
REPLICATE_AVAILABLE = False
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def generate_video_clip(
|
|
43
|
+
first_image: Path,
|
|
44
|
+
last_image: Path,
|
|
45
|
+
output_path: Path,
|
|
46
|
+
prompt: str = "",
|
|
47
|
+
fps: int = 16,
|
|
48
|
+
num_frames: int = 81,
|
|
49
|
+
resolution: str = "480p",
|
|
50
|
+
) -> bool:
|
|
51
|
+
"""Generate a video clip between two images.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
first_image: Starting frame
|
|
55
|
+
last_image: Ending frame
|
|
56
|
+
output_path: Where to save the video
|
|
57
|
+
prompt: Optional prompt describing the motion
|
|
58
|
+
fps: Frames per second
|
|
59
|
+
num_frames: Total number of frames
|
|
60
|
+
resolution: Output resolution (480p, 720p)
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
True if successful
|
|
64
|
+
"""
|
|
65
|
+
if not REPLICATE_AVAILABLE:
|
|
66
|
+
logger.error("replicate package not installed. Run: pip install replicate")
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
api_token = os.environ.get("REPLICATE_API_TOKEN")
|
|
70
|
+
if not api_token:
|
|
71
|
+
logger.error("REPLICATE_API_TOKEN not set")
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
logger.info(f"🎬 Generating: {first_image.name} → {last_image.name}")
|
|
76
|
+
|
|
77
|
+
client = replicate.Client(api_token=api_token)
|
|
78
|
+
|
|
79
|
+
with open(first_image, "rb") as f1, open(last_image, "rb") as f2:
|
|
80
|
+
output = client.run(
|
|
81
|
+
"wan-video/wan-2.2-i2v-fast",
|
|
82
|
+
input={
|
|
83
|
+
"image": f1,
|
|
84
|
+
"last_image": f2,
|
|
85
|
+
"prompt": prompt or "Smooth camera motion, cinematic transition",
|
|
86
|
+
"go_fast": True,
|
|
87
|
+
"num_frames": num_frames,
|
|
88
|
+
"resolution": resolution,
|
|
89
|
+
"sample_shift": 12,
|
|
90
|
+
"frames_per_second": fps,
|
|
91
|
+
"interpolate_output": False,
|
|
92
|
+
"lora_scale_transformer": 1,
|
|
93
|
+
"lora_scale_transformer_2": 1,
|
|
94
|
+
},
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Save the video
|
|
98
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
99
|
+
with open(output_path, "wb") as f:
|
|
100
|
+
f.write(output.read())
|
|
101
|
+
|
|
102
|
+
logger.info(f"✓ Saved: {output_path}")
|
|
103
|
+
return True
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error(f"✗ Failed: {e}")
|
|
107
|
+
return False
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def get_prompt_for_pair(
|
|
111
|
+
metadata: dict | None,
|
|
112
|
+
img1_name: str,
|
|
113
|
+
img2_name: str,
|
|
114
|
+
) -> str:
|
|
115
|
+
"""Try to extract relevant prompt from metadata."""
|
|
116
|
+
if not metadata:
|
|
117
|
+
return ""
|
|
118
|
+
|
|
119
|
+
# Parse panel number from filename
|
|
120
|
+
# e.g., "panel_1_first_frame.png" → panel 1
|
|
121
|
+
for panel in metadata.get("panels", []):
|
|
122
|
+
prompts = panel.get("prompts", {})
|
|
123
|
+
|
|
124
|
+
# If transitioning within same panel, use the prompts
|
|
125
|
+
if f"panel_{panel['index']}" in img1_name:
|
|
126
|
+
if "first_frame" in img1_name and "original" in img2_name:
|
|
127
|
+
return prompts.get("original", "")
|
|
128
|
+
if "original" in img1_name and "last_frame" in img2_name:
|
|
129
|
+
return prompts.get("last_frame", "")
|
|
130
|
+
if "first_frame" in img1_name and "last_frame" in img2_name:
|
|
131
|
+
return prompts.get("last_frame", "")
|
|
132
|
+
|
|
133
|
+
return ""
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def concatenate_videos(video_paths: list[Path], output_path: Path) -> bool:
|
|
137
|
+
"""Concatenate multiple video clips into one using ffmpeg.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
video_paths: List of video files in order
|
|
141
|
+
output_path: Output file path
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
True if successful
|
|
145
|
+
"""
|
|
146
|
+
if not video_paths:
|
|
147
|
+
return False
|
|
148
|
+
|
|
149
|
+
# Create concat file list in the same directory
|
|
150
|
+
concat_file = output_path.parent / "concat_list.txt"
|
|
151
|
+
with open(concat_file, "w") as f:
|
|
152
|
+
for video in video_paths:
|
|
153
|
+
f.write(f"file '{video.name}'\n")
|
|
154
|
+
|
|
155
|
+
try:
|
|
156
|
+
result = subprocess.run(
|
|
157
|
+
[
|
|
158
|
+
"ffmpeg",
|
|
159
|
+
"-y", # Overwrite
|
|
160
|
+
"-f",
|
|
161
|
+
"concat",
|
|
162
|
+
"-safe",
|
|
163
|
+
"0",
|
|
164
|
+
"-i",
|
|
165
|
+
"concat_list.txt", # Use relative name since cwd is set
|
|
166
|
+
"-c",
|
|
167
|
+
"copy",
|
|
168
|
+
output_path.name, # Use relative name
|
|
169
|
+
],
|
|
170
|
+
capture_output=True,
|
|
171
|
+
text=True,
|
|
172
|
+
cwd=str(output_path.parent), # Ensure string path
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
concat_file.unlink() # Clean up
|
|
176
|
+
|
|
177
|
+
if result.returncode != 0:
|
|
178
|
+
logger.error(f"ffmpeg error: {result.stderr}")
|
|
179
|
+
return False
|
|
180
|
+
|
|
181
|
+
return True
|
|
182
|
+
|
|
183
|
+
except FileNotFoundError:
|
|
184
|
+
logger.error("ffmpeg not found. Install with: brew install ffmpeg")
|
|
185
|
+
return False
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def main():
|
|
189
|
+
parser = argparse.ArgumentParser(
|
|
190
|
+
description="Generate video clips from consecutive image pairs"
|
|
191
|
+
)
|
|
192
|
+
parser.add_argument(
|
|
193
|
+
"folder",
|
|
194
|
+
type=Path,
|
|
195
|
+
help="Folder containing images",
|
|
196
|
+
)
|
|
197
|
+
parser.add_argument(
|
|
198
|
+
"--pattern",
|
|
199
|
+
default="*.png",
|
|
200
|
+
help="Glob pattern for images (default: *.png)",
|
|
201
|
+
)
|
|
202
|
+
parser.add_argument(
|
|
203
|
+
"--prompt",
|
|
204
|
+
default="",
|
|
205
|
+
help="Prompt for all video generations",
|
|
206
|
+
)
|
|
207
|
+
parser.add_argument(
|
|
208
|
+
"--fps",
|
|
209
|
+
type=int,
|
|
210
|
+
default=16,
|
|
211
|
+
help="Frames per second (default: 16)",
|
|
212
|
+
)
|
|
213
|
+
parser.add_argument(
|
|
214
|
+
"--frames",
|
|
215
|
+
type=int,
|
|
216
|
+
default=81,
|
|
217
|
+
help="Number of frames (default: 81)",
|
|
218
|
+
)
|
|
219
|
+
parser.add_argument(
|
|
220
|
+
"--resolution",
|
|
221
|
+
default="480p",
|
|
222
|
+
choices=["480p", "720p"],
|
|
223
|
+
help="Output resolution (default: 480p)",
|
|
224
|
+
)
|
|
225
|
+
parser.add_argument(
|
|
226
|
+
"--dry-run",
|
|
227
|
+
action="store_true",
|
|
228
|
+
help="Show what would be generated without running",
|
|
229
|
+
)
|
|
230
|
+
args = parser.parse_args()
|
|
231
|
+
|
|
232
|
+
folder = args.folder
|
|
233
|
+
if not folder.exists():
|
|
234
|
+
logger.error(f"❌ Folder not found: {folder}")
|
|
235
|
+
sys.exit(1)
|
|
236
|
+
|
|
237
|
+
# Find images (alphabetically sorted)
|
|
238
|
+
images = sorted(folder.glob(args.pattern))
|
|
239
|
+
if not images:
|
|
240
|
+
logger.error(f"❌ No images matching '{args.pattern}' in {folder}")
|
|
241
|
+
sys.exit(1)
|
|
242
|
+
|
|
243
|
+
# Filter out non-frame images
|
|
244
|
+
images = [img for img in images if img.name not in ("character.png",)]
|
|
245
|
+
|
|
246
|
+
logger.info(f"📁 Found {len(images)} images in {folder} (alphabetical order)")
|
|
247
|
+
for img in images:
|
|
248
|
+
logger.info(f" {img.name}")
|
|
249
|
+
|
|
250
|
+
# Load metadata if available
|
|
251
|
+
metadata_path = folder / "animated_character_story.json"
|
|
252
|
+
metadata = None
|
|
253
|
+
if metadata_path.exists():
|
|
254
|
+
metadata = json.loads(metadata_path.read_text())
|
|
255
|
+
logger.info(f"📝 Loaded metadata from {metadata_path.name}")
|
|
256
|
+
|
|
257
|
+
# Create output folder
|
|
258
|
+
videos_folder = folder / "videos"
|
|
259
|
+
if not args.dry_run:
|
|
260
|
+
videos_folder.mkdir(exist_ok=True)
|
|
261
|
+
|
|
262
|
+
# Generate videos for consecutive pairs
|
|
263
|
+
pairs = list(zip(images[:-1], images[1:]))
|
|
264
|
+
logger.info(f"\n🎬 Generating {len(pairs)} video clips (parallel):")
|
|
265
|
+
|
|
266
|
+
# Build list of jobs
|
|
267
|
+
jobs = []
|
|
268
|
+
for i, (img1, img2) in enumerate(pairs, 1):
|
|
269
|
+
output_name = f"clip_{i:02d}_{img1.stem}_to_{img2.stem}.mp4"
|
|
270
|
+
output_path = videos_folder / output_name
|
|
271
|
+
prompt = args.prompt or get_prompt_for_pair(metadata, img1.name, img2.name)
|
|
272
|
+
|
|
273
|
+
logger.info(f" [{i}] {img1.name} → {img2.name}")
|
|
274
|
+
if prompt:
|
|
275
|
+
logger.info(f" Prompt: {prompt[:50]}...")
|
|
276
|
+
|
|
277
|
+
jobs.append(
|
|
278
|
+
{
|
|
279
|
+
"index": i,
|
|
280
|
+
"img1": img1,
|
|
281
|
+
"img2": img2,
|
|
282
|
+
"output_path": output_path,
|
|
283
|
+
"prompt": prompt,
|
|
284
|
+
}
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
if args.dry_run:
|
|
288
|
+
for job in jobs:
|
|
289
|
+
logger.info(f" Would save: {job['output_path']}")
|
|
290
|
+
sys.exit(0)
|
|
291
|
+
|
|
292
|
+
# Parallel generation
|
|
293
|
+
generated_clips = []
|
|
294
|
+
|
|
295
|
+
def run_job(job):
|
|
296
|
+
success = generate_video_clip(
|
|
297
|
+
first_image=job["img1"],
|
|
298
|
+
last_image=job["img2"],
|
|
299
|
+
output_path=job["output_path"],
|
|
300
|
+
prompt=job["prompt"],
|
|
301
|
+
fps=args.fps,
|
|
302
|
+
num_frames=args.frames,
|
|
303
|
+
resolution=args.resolution,
|
|
304
|
+
)
|
|
305
|
+
return job["index"], job["output_path"], success
|
|
306
|
+
|
|
307
|
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
308
|
+
futures = {executor.submit(run_job, job): job for job in jobs}
|
|
309
|
+
|
|
310
|
+
for future in as_completed(futures):
|
|
311
|
+
idx, output_path, success = future.result()
|
|
312
|
+
if success:
|
|
313
|
+
generated_clips.append((idx, output_path))
|
|
314
|
+
|
|
315
|
+
# Sort by index and collect paths
|
|
316
|
+
generated_clips.sort(key=lambda x: x[0])
|
|
317
|
+
clip_paths = [path for _, path in generated_clips]
|
|
318
|
+
|
|
319
|
+
logger.info(f"\n✅ Generated {len(clip_paths)}/{len(pairs)} video clips")
|
|
320
|
+
|
|
321
|
+
# Concatenate into final video
|
|
322
|
+
if len(clip_paths) > 1:
|
|
323
|
+
final_output = videos_folder / "final_combined.mp4"
|
|
324
|
+
logger.info(f"\n🎞️ Concatenating clips into {final_output.name}...")
|
|
325
|
+
|
|
326
|
+
if concatenate_videos(clip_paths, final_output):
|
|
327
|
+
logger.info(f"✅ Final video: {final_output}")
|
|
328
|
+
else:
|
|
329
|
+
logger.warning("⚠️ Concatenation failed, individual clips available")
|
|
330
|
+
|
|
331
|
+
logger.info(f"📂 Output: {videos_folder}")
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
if __name__ == "__main__":
|
|
335
|
+
main()
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
"""Animated character storyboard node.
|
|
2
|
+
|
|
3
|
+
Workflow:
|
|
4
|
+
1. If no reference_image provided, generate character.png from character_prompt
|
|
5
|
+
2. For each panel:
|
|
6
|
+
- Original: img2img from character/reference (magic=0.25 for consistency)
|
|
7
|
+
- First frame: img2img from original
|
|
8
|
+
- Last frame: img2img from original
|
|
9
|
+
|
|
10
|
+
This ensures character consistency across all panels.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from .replicate_tool import edit_image, generate_image
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
GraphState = dict[str, Any]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def generate_animated_character_images(state: GraphState) -> dict:
|
|
29
|
+
"""Generate animated character-consistent storyboard images.
|
|
30
|
+
|
|
31
|
+
Workflow:
|
|
32
|
+
1. If no reference_image: generate character.png from character_prompt
|
|
33
|
+
2. For each panel:
|
|
34
|
+
- Original: img2img from character/reference
|
|
35
|
+
- First frame: img2img from original
|
|
36
|
+
- Last frame: img2img from original
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
state: Graph state with 'story', 'animated_panels', and optional 'reference_image'
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
State update with 'images', 'character_image', 'output_dir'
|
|
43
|
+
"""
|
|
44
|
+
story = state.get("story")
|
|
45
|
+
animated_panels = state.get("animated_panels", [])
|
|
46
|
+
|
|
47
|
+
if not story:
|
|
48
|
+
logger.error("No story in state")
|
|
49
|
+
return {
|
|
50
|
+
"current_step": "generate_animated_character_images",
|
|
51
|
+
"images": [],
|
|
52
|
+
"error": "No story in state",
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Handle Pydantic model or dict
|
|
56
|
+
if hasattr(story, "model_dump"):
|
|
57
|
+
story_dict = story.model_dump()
|
|
58
|
+
elif isinstance(story, dict):
|
|
59
|
+
story_dict = story
|
|
60
|
+
else:
|
|
61
|
+
story_dict = {}
|
|
62
|
+
|
|
63
|
+
character_prompt = story_dict.get("character_prompt", "")
|
|
64
|
+
if not character_prompt:
|
|
65
|
+
logger.error("No character_prompt in story")
|
|
66
|
+
return {
|
|
67
|
+
"current_step": "generate_animated_character_images",
|
|
68
|
+
"images": [],
|
|
69
|
+
"error": "No character_prompt provided",
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
if not animated_panels:
|
|
73
|
+
logger.error("No animated_panels in state")
|
|
74
|
+
return {
|
|
75
|
+
"current_step": "generate_animated_character_images",
|
|
76
|
+
"images": [],
|
|
77
|
+
"error": "No animated panels to generate",
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
# Sort by _map_index if present
|
|
81
|
+
if animated_panels and isinstance(animated_panels[0], dict):
|
|
82
|
+
animated_panels = sorted(
|
|
83
|
+
animated_panels,
|
|
84
|
+
key=lambda x: x.get("_map_index", 0) if isinstance(x, dict) else 0,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Create output directory
|
|
88
|
+
thread_id = state.get("thread_id", datetime.now().strftime("%Y%m%d_%H%M%S"))
|
|
89
|
+
output_dir = Path("outputs/storyboard") / thread_id / "animated"
|
|
90
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
91
|
+
|
|
92
|
+
model_name = state.get("model", "z-image")
|
|
93
|
+
reference_image = state.get("reference_image")
|
|
94
|
+
reference_path = Path(reference_image) if reference_image else None
|
|
95
|
+
|
|
96
|
+
logger.info(f"🎬 Generating animated character storyboard in {output_dir}")
|
|
97
|
+
logger.info(f"🖼️ Using model: {model_name}")
|
|
98
|
+
|
|
99
|
+
# If no reference provided, generate a character image first
|
|
100
|
+
if not reference_path:
|
|
101
|
+
character_path = output_dir / "character.png"
|
|
102
|
+
logger.info("🎭 Generating character base image...")
|
|
103
|
+
char_result = generate_image(
|
|
104
|
+
prompt=character_prompt,
|
|
105
|
+
output_path=character_path,
|
|
106
|
+
model_name=model_name,
|
|
107
|
+
)
|
|
108
|
+
if char_result.success:
|
|
109
|
+
reference_path = character_path
|
|
110
|
+
logger.info(f"✓ Character image created: {character_path}")
|
|
111
|
+
else:
|
|
112
|
+
logger.error(f"Character generation failed: {char_result.error}")
|
|
113
|
+
return {
|
|
114
|
+
"current_step": "generate_animated_character_images",
|
|
115
|
+
"images": [],
|
|
116
|
+
"error": f"Character generation failed: {char_result.error}",
|
|
117
|
+
}
|
|
118
|
+
else:
|
|
119
|
+
logger.info(f"🎭 Using reference image: {reference_path}")
|
|
120
|
+
|
|
121
|
+
# Generate frames for each panel
|
|
122
|
+
# Each panel: img2img from character/reference, then img2img for first/last
|
|
123
|
+
total_images = len(animated_panels) * 3
|
|
124
|
+
logger.info(
|
|
125
|
+
f"🎞️ Generating {total_images} frames ({len(animated_panels)} panels × 3)"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
all_results: list[dict] = []
|
|
129
|
+
|
|
130
|
+
for panel_idx, panel in enumerate(animated_panels, 1):
|
|
131
|
+
if hasattr(panel, "model_dump"):
|
|
132
|
+
panel_dict = panel.model_dump()
|
|
133
|
+
elif isinstance(panel, dict):
|
|
134
|
+
panel_dict = panel
|
|
135
|
+
else:
|
|
136
|
+
logger.warning(f"Panel {panel_idx} has unexpected type: {type(panel)}")
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
panel_result = {"panel": panel_idx, "frames": {}}
|
|
140
|
+
|
|
141
|
+
# Step 1: Generate ORIGINAL image via img2img from character/reference
|
|
142
|
+
original_prompt = panel_dict.get("original", "")
|
|
143
|
+
if not original_prompt:
|
|
144
|
+
logger.warning(f"Panel {panel_idx} missing original prompt")
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
# Combine character + scene for consistency
|
|
148
|
+
full_original_prompt = f"{character_prompt}, {original_prompt}"
|
|
149
|
+
original_path = output_dir / f"panel_{panel_idx}_original.png"
|
|
150
|
+
logger.info(f"📸 Panel {panel_idx} original: {original_prompt[:50]}...")
|
|
151
|
+
|
|
152
|
+
# Use img2img from reference for character consistency
|
|
153
|
+
# magic=0.25 preserves more of the reference (lower = more original)
|
|
154
|
+
original_result = edit_image(
|
|
155
|
+
input_image=reference_path,
|
|
156
|
+
prompt=full_original_prompt,
|
|
157
|
+
output_path=original_path,
|
|
158
|
+
aspect_ratio="16:9",
|
|
159
|
+
magic=0.25,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
if not original_result.success:
|
|
163
|
+
logger.error(f"Panel {panel_idx} original failed: {original_result.error}")
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
panel_result["frames"]["original"] = str(original_path)
|
|
167
|
+
logger.info(f"✓ Panel {panel_idx} original created")
|
|
168
|
+
|
|
169
|
+
# Step 2: Generate first_frame via img2img from original
|
|
170
|
+
first_prompt = panel_dict.get("first_frame", "")
|
|
171
|
+
if first_prompt:
|
|
172
|
+
first_path = output_dir / f"panel_{panel_idx}_first_frame.png"
|
|
173
|
+
logger.info(f"📸 Panel {panel_idx} first_frame: {first_prompt[:50]}...")
|
|
174
|
+
|
|
175
|
+
first_result = edit_image(
|
|
176
|
+
input_image=original_path,
|
|
177
|
+
prompt=first_prompt,
|
|
178
|
+
output_path=first_path,
|
|
179
|
+
aspect_ratio="16:9",
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
if first_result.success and first_result.path:
|
|
183
|
+
panel_result["frames"]["first_frame"] = first_result.path
|
|
184
|
+
else:
|
|
185
|
+
logger.error(
|
|
186
|
+
f"Panel {panel_idx} first_frame failed: {first_result.error}"
|
|
187
|
+
)
|
|
188
|
+
panel_result["frames"]["first_frame"] = None
|
|
189
|
+
|
|
190
|
+
# Step 3: Generate last_frame via img2img from original
|
|
191
|
+
last_prompt = panel_dict.get("last_frame", "")
|
|
192
|
+
if last_prompt:
|
|
193
|
+
last_path = output_dir / f"panel_{panel_idx}_last_frame.png"
|
|
194
|
+
logger.info(f"📸 Panel {panel_idx} last_frame: {last_prompt[:50]}...")
|
|
195
|
+
|
|
196
|
+
last_result = edit_image(
|
|
197
|
+
input_image=original_path,
|
|
198
|
+
prompt=last_prompt,
|
|
199
|
+
output_path=last_path,
|
|
200
|
+
aspect_ratio="16:9",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
if last_result.success and last_result.path:
|
|
204
|
+
panel_result["frames"]["last_frame"] = last_result.path
|
|
205
|
+
else:
|
|
206
|
+
logger.error(
|
|
207
|
+
f"Panel {panel_idx} last_frame failed: {last_result.error}"
|
|
208
|
+
)
|
|
209
|
+
panel_result["frames"]["last_frame"] = None
|
|
210
|
+
|
|
211
|
+
all_results.append(panel_result)
|
|
212
|
+
|
|
213
|
+
# Save metadata
|
|
214
|
+
frame_keys = ["first_frame", "original", "last_frame"]
|
|
215
|
+
metadata_path = output_dir / "animated_character_story.json"
|
|
216
|
+
metadata = {
|
|
217
|
+
"concept": state.get("concept", ""),
|
|
218
|
+
"title": story_dict.get("title", ""),
|
|
219
|
+
"narrative": story_dict.get("narrative", ""),
|
|
220
|
+
"character_prompt": character_prompt,
|
|
221
|
+
"reference_image": str(reference_path) if reference_path else None,
|
|
222
|
+
"panels": [
|
|
223
|
+
{
|
|
224
|
+
"index": r["panel"],
|
|
225
|
+
"frames": r["frames"],
|
|
226
|
+
"prompts": {
|
|
227
|
+
k: animated_panels[r["panel"] - 1].get(k, "")
|
|
228
|
+
if isinstance(animated_panels[r["panel"] - 1], dict)
|
|
229
|
+
else ""
|
|
230
|
+
for k in frame_keys
|
|
231
|
+
},
|
|
232
|
+
}
|
|
233
|
+
for r in all_results
|
|
234
|
+
],
|
|
235
|
+
"generated_at": datetime.now().isoformat(),
|
|
236
|
+
}
|
|
237
|
+
metadata_path.write_text(json.dumps(metadata, indent=2))
|
|
238
|
+
logger.info(f"📝 Metadata saved: {metadata_path}")
|
|
239
|
+
|
|
240
|
+
success_count = sum(1 for r in all_results for path in r["frames"].values() if path)
|
|
241
|
+
logger.info(f"✅ Generated {success_count}/{total_images} images")
|
|
242
|
+
|
|
243
|
+
return {
|
|
244
|
+
"current_step": "generate_animated_character_images",
|
|
245
|
+
"character_image": str(reference_path),
|
|
246
|
+
"images": all_results,
|
|
247
|
+
"output_dir": str(output_dir),
|
|
248
|
+
}
|