mcli-framework 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (186) hide show
  1. mcli/app/chat_cmd.py +42 -0
  2. mcli/app/commands_cmd.py +226 -0
  3. mcli/app/completion_cmd.py +216 -0
  4. mcli/app/completion_helpers.py +288 -0
  5. mcli/app/cron_test_cmd.py +697 -0
  6. mcli/app/logs_cmd.py +419 -0
  7. mcli/app/main.py +492 -0
  8. mcli/app/model/model.py +1060 -0
  9. mcli/app/model_cmd.py +227 -0
  10. mcli/app/redis_cmd.py +269 -0
  11. mcli/app/video/video.py +1114 -0
  12. mcli/app/visual_cmd.py +303 -0
  13. mcli/chat/chat.py +2409 -0
  14. mcli/chat/command_rag.py +514 -0
  15. mcli/chat/enhanced_chat.py +652 -0
  16. mcli/chat/system_controller.py +1010 -0
  17. mcli/chat/system_integration.py +1016 -0
  18. mcli/cli.py +25 -0
  19. mcli/config.toml +20 -0
  20. mcli/lib/api/api.py +586 -0
  21. mcli/lib/api/daemon_client.py +203 -0
  22. mcli/lib/api/daemon_client_local.py +44 -0
  23. mcli/lib/api/daemon_decorator.py +217 -0
  24. mcli/lib/api/mcli_decorators.py +1032 -0
  25. mcli/lib/auth/auth.py +85 -0
  26. mcli/lib/auth/aws_manager.py +85 -0
  27. mcli/lib/auth/azure_manager.py +91 -0
  28. mcli/lib/auth/credential_manager.py +192 -0
  29. mcli/lib/auth/gcp_manager.py +93 -0
  30. mcli/lib/auth/key_manager.py +117 -0
  31. mcli/lib/auth/mcli_manager.py +93 -0
  32. mcli/lib/auth/token_manager.py +75 -0
  33. mcli/lib/auth/token_util.py +1011 -0
  34. mcli/lib/config/config.py +47 -0
  35. mcli/lib/discovery/__init__.py +1 -0
  36. mcli/lib/discovery/command_discovery.py +274 -0
  37. mcli/lib/erd/erd.py +1345 -0
  38. mcli/lib/erd/generate_graph.py +453 -0
  39. mcli/lib/files/files.py +76 -0
  40. mcli/lib/fs/fs.py +109 -0
  41. mcli/lib/lib.py +29 -0
  42. mcli/lib/logger/logger.py +611 -0
  43. mcli/lib/performance/optimizer.py +409 -0
  44. mcli/lib/performance/rust_bridge.py +502 -0
  45. mcli/lib/performance/uvloop_config.py +154 -0
  46. mcli/lib/pickles/pickles.py +50 -0
  47. mcli/lib/search/cached_vectorizer.py +479 -0
  48. mcli/lib/services/data_pipeline.py +460 -0
  49. mcli/lib/services/lsh_client.py +441 -0
  50. mcli/lib/services/redis_service.py +387 -0
  51. mcli/lib/shell/shell.py +137 -0
  52. mcli/lib/toml/toml.py +33 -0
  53. mcli/lib/ui/styling.py +47 -0
  54. mcli/lib/ui/visual_effects.py +634 -0
  55. mcli/lib/watcher/watcher.py +185 -0
  56. mcli/ml/api/app.py +215 -0
  57. mcli/ml/api/middleware.py +224 -0
  58. mcli/ml/api/routers/admin_router.py +12 -0
  59. mcli/ml/api/routers/auth_router.py +244 -0
  60. mcli/ml/api/routers/backtest_router.py +12 -0
  61. mcli/ml/api/routers/data_router.py +12 -0
  62. mcli/ml/api/routers/model_router.py +302 -0
  63. mcli/ml/api/routers/monitoring_router.py +12 -0
  64. mcli/ml/api/routers/portfolio_router.py +12 -0
  65. mcli/ml/api/routers/prediction_router.py +267 -0
  66. mcli/ml/api/routers/trade_router.py +12 -0
  67. mcli/ml/api/routers/websocket_router.py +76 -0
  68. mcli/ml/api/schemas.py +64 -0
  69. mcli/ml/auth/auth_manager.py +425 -0
  70. mcli/ml/auth/models.py +154 -0
  71. mcli/ml/auth/permissions.py +302 -0
  72. mcli/ml/backtesting/backtest_engine.py +502 -0
  73. mcli/ml/backtesting/performance_metrics.py +393 -0
  74. mcli/ml/cache.py +400 -0
  75. mcli/ml/cli/main.py +398 -0
  76. mcli/ml/config/settings.py +394 -0
  77. mcli/ml/configs/dvc_config.py +230 -0
  78. mcli/ml/configs/mlflow_config.py +131 -0
  79. mcli/ml/configs/mlops_manager.py +293 -0
  80. mcli/ml/dashboard/app.py +532 -0
  81. mcli/ml/dashboard/app_integrated.py +738 -0
  82. mcli/ml/dashboard/app_supabase.py +560 -0
  83. mcli/ml/dashboard/app_training.py +615 -0
  84. mcli/ml/dashboard/cli.py +51 -0
  85. mcli/ml/data_ingestion/api_connectors.py +501 -0
  86. mcli/ml/data_ingestion/data_pipeline.py +567 -0
  87. mcli/ml/data_ingestion/stream_processor.py +512 -0
  88. mcli/ml/database/migrations/env.py +94 -0
  89. mcli/ml/database/models.py +667 -0
  90. mcli/ml/database/session.py +200 -0
  91. mcli/ml/experimentation/ab_testing.py +845 -0
  92. mcli/ml/features/ensemble_features.py +607 -0
  93. mcli/ml/features/political_features.py +676 -0
  94. mcli/ml/features/recommendation_engine.py +809 -0
  95. mcli/ml/features/stock_features.py +573 -0
  96. mcli/ml/features/test_feature_engineering.py +346 -0
  97. mcli/ml/logging.py +85 -0
  98. mcli/ml/mlops/data_versioning.py +518 -0
  99. mcli/ml/mlops/experiment_tracker.py +377 -0
  100. mcli/ml/mlops/model_serving.py +481 -0
  101. mcli/ml/mlops/pipeline_orchestrator.py +614 -0
  102. mcli/ml/models/base_models.py +324 -0
  103. mcli/ml/models/ensemble_models.py +675 -0
  104. mcli/ml/models/recommendation_models.py +474 -0
  105. mcli/ml/models/test_models.py +487 -0
  106. mcli/ml/monitoring/drift_detection.py +676 -0
  107. mcli/ml/monitoring/metrics.py +45 -0
  108. mcli/ml/optimization/portfolio_optimizer.py +834 -0
  109. mcli/ml/preprocessing/data_cleaners.py +451 -0
  110. mcli/ml/preprocessing/feature_extractors.py +491 -0
  111. mcli/ml/preprocessing/ml_pipeline.py +382 -0
  112. mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
  113. mcli/ml/preprocessing/test_preprocessing.py +294 -0
  114. mcli/ml/scripts/populate_sample_data.py +200 -0
  115. mcli/ml/tasks.py +400 -0
  116. mcli/ml/tests/test_integration.py +429 -0
  117. mcli/ml/tests/test_training_dashboard.py +387 -0
  118. mcli/public/oi/oi.py +15 -0
  119. mcli/public/public.py +4 -0
  120. mcli/self/self_cmd.py +1246 -0
  121. mcli/workflow/daemon/api_daemon.py +800 -0
  122. mcli/workflow/daemon/async_command_database.py +681 -0
  123. mcli/workflow/daemon/async_process_manager.py +591 -0
  124. mcli/workflow/daemon/client.py +530 -0
  125. mcli/workflow/daemon/commands.py +1196 -0
  126. mcli/workflow/daemon/daemon.py +905 -0
  127. mcli/workflow/daemon/daemon_api.py +59 -0
  128. mcli/workflow/daemon/enhanced_daemon.py +571 -0
  129. mcli/workflow/daemon/process_cli.py +244 -0
  130. mcli/workflow/daemon/process_manager.py +439 -0
  131. mcli/workflow/daemon/test_daemon.py +275 -0
  132. mcli/workflow/dashboard/dashboard_cmd.py +113 -0
  133. mcli/workflow/docker/docker.py +0 -0
  134. mcli/workflow/file/file.py +100 -0
  135. mcli/workflow/gcloud/config.toml +21 -0
  136. mcli/workflow/gcloud/gcloud.py +58 -0
  137. mcli/workflow/git_commit/ai_service.py +328 -0
  138. mcli/workflow/git_commit/commands.py +430 -0
  139. mcli/workflow/lsh_integration.py +355 -0
  140. mcli/workflow/model_service/client.py +594 -0
  141. mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
  142. mcli/workflow/model_service/lightweight_embedder.py +397 -0
  143. mcli/workflow/model_service/lightweight_model_server.py +714 -0
  144. mcli/workflow/model_service/lightweight_test.py +241 -0
  145. mcli/workflow/model_service/model_service.py +1955 -0
  146. mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
  147. mcli/workflow/model_service/pdf_processor.py +386 -0
  148. mcli/workflow/model_service/test_efficient_runner.py +234 -0
  149. mcli/workflow/model_service/test_example.py +315 -0
  150. mcli/workflow/model_service/test_integration.py +131 -0
  151. mcli/workflow/model_service/test_new_features.py +149 -0
  152. mcli/workflow/openai/openai.py +99 -0
  153. mcli/workflow/politician_trading/commands.py +1790 -0
  154. mcli/workflow/politician_trading/config.py +134 -0
  155. mcli/workflow/politician_trading/connectivity.py +490 -0
  156. mcli/workflow/politician_trading/data_sources.py +395 -0
  157. mcli/workflow/politician_trading/database.py +410 -0
  158. mcli/workflow/politician_trading/demo.py +248 -0
  159. mcli/workflow/politician_trading/models.py +165 -0
  160. mcli/workflow/politician_trading/monitoring.py +413 -0
  161. mcli/workflow/politician_trading/scrapers.py +966 -0
  162. mcli/workflow/politician_trading/scrapers_california.py +412 -0
  163. mcli/workflow/politician_trading/scrapers_eu.py +377 -0
  164. mcli/workflow/politician_trading/scrapers_uk.py +350 -0
  165. mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
  166. mcli/workflow/politician_trading/supabase_functions.py +354 -0
  167. mcli/workflow/politician_trading/workflow.py +852 -0
  168. mcli/workflow/registry/registry.py +180 -0
  169. mcli/workflow/repo/repo.py +223 -0
  170. mcli/workflow/scheduler/commands.py +493 -0
  171. mcli/workflow/scheduler/cron_parser.py +238 -0
  172. mcli/workflow/scheduler/job.py +182 -0
  173. mcli/workflow/scheduler/monitor.py +139 -0
  174. mcli/workflow/scheduler/persistence.py +324 -0
  175. mcli/workflow/scheduler/scheduler.py +679 -0
  176. mcli/workflow/sync/sync_cmd.py +437 -0
  177. mcli/workflow/sync/test_cmd.py +314 -0
  178. mcli/workflow/videos/videos.py +242 -0
  179. mcli/workflow/wakatime/wakatime.py +11 -0
  180. mcli/workflow/workflow.py +37 -0
  181. mcli_framework-7.0.0.dist-info/METADATA +479 -0
  182. mcli_framework-7.0.0.dist-info/RECORD +186 -0
  183. mcli_framework-7.0.0.dist-info/WHEEL +5 -0
  184. mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
  185. mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
  186. mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1060 @@
1
+ import base64
2
+ import json
3
+ import os
4
+ import queue
5
+ import shutil
6
+ import sys
7
+ import tempfile
8
+ import threading
9
+ import time
10
+ import uuid
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List, Optional, Tuple
13
+
14
+ import click
15
+ import cv2
16
+ import numpy as np
17
+ import requests
18
+ from PIL import Image
19
+
20
+ # Configuration paths based on the provided directory structure
21
+ CONFIG = {
22
+ "hunyuan_video_model": "",
23
+ "hunyuan_vae": "",
24
+ "clip_vision_model": "",
25
+ "text_encoder": "",
26
+ "controlnet_model": "",
27
+ "lora_model": "", # Example LoRA
28
+ "temp_dir": "./temp_frames",
29
+ "output_dir": "./output",
30
+ "comfyui_api": "",
31
+ }
32
+
33
+
34
+ class VideoProcessor:
35
+ """Handles video processing operations including frame extraction and reconstruction."""
36
+
37
+ def __init__(self, temp_dir: str = CONFIG["temp_dir"]):
38
+ self.temp_dir = temp_dir
39
+ os.makedirs(temp_dir, exist_ok=True)
40
+ os.makedirs(CONFIG["output_dir"], exist_ok=True)
41
+
42
+ def extract_frames(self, video_path: str, fps: int = 8) -> List[str]:
43
+ """
44
+ Extract frames from video at specified FPS.
45
+
46
+ Args:
47
+ video_path: Path to input video
48
+ fps: Frames per second to extract
49
+
50
+ Returns:
51
+ List of paths to extracted frames
52
+ """
53
+ click.echo(click.style(f"Extracting frames from {video_path} at {fps} FPS...", fg="green"))
54
+
55
+ # Clean temp directory
56
+ for file in os.listdir(self.temp_dir):
57
+ os.remove(os.path.join(self.temp_dir, file))
58
+
59
+ # Extract frames
60
+ video = cv2.VideoCapture(video_path)
61
+ video_fps = video.get(cv2.CAP_PROP_FPS)
62
+ frame_interval = int(video_fps / fps)
63
+ frame_paths = []
64
+
65
+ frame_count = 0
66
+ frame_saved = 0
67
+
68
+ with click.progressbar(
69
+ length=int(video.get(cv2.CAP_PROP_FRAME_COUNT)), label="Extracting frames"
70
+ ) as bar:
71
+ while True:
72
+ success, frame = video.read()
73
+ if not success:
74
+ break
75
+
76
+ if frame_count % frame_interval == 0:
77
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
78
+ frame_path = os.path.join(self.temp_dir, f"frame_{frame_saved:05d}.png")
79
+ Image.fromarray(frame_rgb).save(frame_path)
80
+ frame_paths.append(frame_path)
81
+ frame_saved += 1
82
+
83
+ frame_count += 1
84
+ bar.update(1)
85
+
86
+ video.release()
87
+ click.echo(f"Extracted {len(frame_paths)} frames.")
88
+
89
+ # Save video info for reconstruction
90
+ self.video_info = {
91
+ "original_fps": video_fps,
92
+ "width": int(video.get(cv2.CAP_PROP_FRAME_WIDTH)),
93
+ "height": int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)),
94
+ "total_frames": frame_count,
95
+ }
96
+
97
+ return frame_paths
98
+
99
+ def extract_motion_vectors(self, video_path: str) -> Dict[str, Any]:
100
+ """
101
+ Extract motion vectors from video for temporal consistency.
102
+ This is a simplified placeholder for actual motion vector extraction.
103
+
104
+ Args:
105
+ video_path: Path to input video
106
+
107
+ Returns:
108
+ Dictionary with motion vector data
109
+ """
110
+ # Placeholder for motion vector extraction
111
+ # In a complete implementation, this would use optical flow or
112
+ # dedicated motion vector extraction techniques
113
+ click.echo(click.style("Extracting motion vectors...", fg="blue"))
114
+
115
+ # Simple optical flow calculation between consecutive frames
116
+ video = cv2.VideoCapture(video_path)
117
+ ret, prev_frame = video.read()
118
+ prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
119
+
120
+ motion_data = {}
121
+ frame_idx = 0
122
+
123
+ with click.progressbar(
124
+ length=int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - 1, label="Analyzing motion"
125
+ ) as bar:
126
+ while True:
127
+ ret, frame = video.read()
128
+ if not ret:
129
+ break
130
+
131
+ curr_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
132
+ flow = cv2.calcOpticalFlowFarneback(
133
+ prev_gray,
134
+ curr_gray,
135
+ None,
136
+ pyr_scale=0.5,
137
+ levels=3,
138
+ winsize=15,
139
+ iterations=3,
140
+ poly_n=5,
141
+ poly_sigma=1.2,
142
+ flags=0,
143
+ )
144
+
145
+ # Store compressed flow data
146
+ motion_data[f"frame_{frame_idx:05d}"] = {
147
+ "mean_x": float(np.mean(flow[..., 0])),
148
+ "mean_y": float(np.mean(flow[..., 1])),
149
+ "std_x": float(np.std(flow[..., 0])),
150
+ "std_y": float(np.std(flow[..., 1])),
151
+ }
152
+
153
+ prev_gray = curr_gray
154
+ frame_idx += 1
155
+ bar.update(1)
156
+
157
+ video.release()
158
+ click.echo("Motion analysis complete.")
159
+ return motion_data
160
+
161
+ def frames_to_video(self, frame_paths: List[str], output_path: str, fps: float = None) -> str:
162
+ """
163
+ Convert frames back to video.
164
+
165
+ Args:
166
+ frame_paths: List of paths to frames
167
+ output_path: Path for output video
168
+ fps: Frames per second (defaults to original video FPS)
169
+
170
+ Returns:
171
+ Path to output video
172
+ """
173
+ if not frame_paths:
174
+ raise ValueError("No frames provided")
175
+
176
+ if fps is None:
177
+ fps = self.video_info.get("original_fps", 30)
178
+
179
+ click.echo(
180
+ click.style(
181
+ f"Converting {len(frame_paths)} frames to video at {fps} FPS...", fg="green"
182
+ )
183
+ )
184
+
185
+ # Get dimensions from first frame
186
+ first_frame = cv2.imread(frame_paths[0])
187
+ h, w, _ = first_frame.shape
188
+
189
+ # Initialize video writer
190
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
191
+ video_writer = cv2.VideoWriter(output_path, fourcc, fps, (w, h))
192
+
193
+ # Add frames to video
194
+ with click.progressbar(frame_paths, label="Creating video") as bar:
195
+ for frame_path in bar:
196
+ frame = cv2.imread(frame_path)
197
+ video_writer.write(frame)
198
+
199
+ video_writer.release()
200
+ click.echo(click.style(f"Video saved to {output_path}", fg="bright_green"))
201
+
202
+ return output_path
203
+
204
+ def apply_temporal_consistency(
205
+ self, processed_frames: List[str], motion_data: Dict[str, Any]
206
+ ) -> List[str]:
207
+ """
208
+ Apply temporal consistency to processed frames using motion data.
209
+ This is a simplified implementation.
210
+
211
+ Args:
212
+ processed_frames: List of paths to processed frames
213
+ motion_data: Motion vector data from extract_motion_vectors
214
+
215
+ Returns:
216
+ List of paths to temporally consistent frames
217
+ """
218
+ click.echo(click.style("Applying temporal consistency...", fg="blue"))
219
+
220
+ if len(processed_frames) < 2:
221
+ return processed_frames
222
+
223
+ # Create temp directory for consistent frames
224
+ consistent_dir = os.path.join(self.temp_dir, "consistent")
225
+ os.makedirs(consistent_dir, exist_ok=True)
226
+
227
+ # Simple temporal consistency with weighted blending
228
+ consistent_frame_paths = []
229
+ prev_frame = None
230
+
231
+ with click.progressbar(
232
+ enumerate(processed_frames), length=len(processed_frames), label="Reducing flicker"
233
+ ) as bar:
234
+ for i, frame_path in bar:
235
+ frame = np.array(Image.open(frame_path))
236
+
237
+ if prev_frame is not None:
238
+ # Simple blending with motion-aware weight
239
+ if f"frame_{i-1:05d}" in motion_data:
240
+ motion_info = motion_data[f"frame_{i-1:05d}"]
241
+ # Calculate blending weight based on motion magnitude
242
+ motion_magnitude = np.sqrt(
243
+ motion_info["mean_x"] ** 2 + motion_info["mean_y"] ** 2
244
+ )
245
+ # Less blending when motion is high, more when motion is low
246
+ blend_weight = max(0.1, min(0.3, 0.4 - motion_magnitude * 0.1))
247
+ else:
248
+ blend_weight = 0.2
249
+
250
+ # Blend frames
251
+ blended_frame = cv2.addWeighted(
252
+ prev_frame, blend_weight, frame, 1.0 - blend_weight, 0
253
+ )
254
+ frame = blended_frame
255
+
256
+ # Save consistent frame
257
+ out_path = os.path.join(consistent_dir, f"consistent_{i:05d}.png")
258
+ Image.fromarray(frame.astype(np.uint8)).save(out_path)
259
+ consistent_frame_paths.append(out_path)
260
+
261
+ prev_frame = frame
262
+
263
+ return consistent_frame_paths
264
+
265
+
266
+ class ComfyUIClient:
267
+ """Client for interacting with ComfyUI API for video generation."""
268
+
269
+ def __init__(self, api_url: str = CONFIG["comfyui_api"]):
270
+ self.api_url = api_url
271
+ self.client_id = str(uuid.uuid4())
272
+ self.session = requests.Session()
273
+
274
+ def queue_prompt(self, prompt: Dict[str, Any]) -> str:
275
+ """
276
+ Queue a prompt in ComfyUI.
277
+
278
+ Args:
279
+ prompt: ComfyUI workflow prompt
280
+
281
+ Returns:
282
+ Prompt ID
283
+ """
284
+ p = {"prompt": prompt, "client_id": self.client_id}
285
+ response = self.session.post(f"{self.api_url}/prompt", json=p)
286
+ return response.json()["prompt_id"]
287
+
288
+ def get_image(self, filename: str, subfolder: str, folder_type: str) -> Image.Image:
289
+ """
290
+ Get an image from ComfyUI.
291
+
292
+ Args:
293
+ filename: Image filename
294
+ subfolder: Image subfolder
295
+ folder_type: Folder type (input or output)
296
+
297
+ Returns:
298
+ PIL Image
299
+ """
300
+ data = {"filename": filename, "subfolder": subfolder, "folder_type": folder_type}
301
+ response = self.session.get(f"{self.api_url}/view", params=data)
302
+ return Image.open(response.raw)
303
+
304
+ def upload_image(self, image_path: str) -> Tuple[str, str]:
305
+ """
306
+ Upload an image to ComfyUI.
307
+
308
+ Args:
309
+ image_path: Path to image
310
+
311
+ Returns:
312
+ Tuple of (filename, subfolder)
313
+ """
314
+ # Read the image and convert to base64
315
+ with open(image_path, "rb") as f:
316
+ encoded_image = base64.b64encode(f.read()).decode("utf-8")
317
+
318
+ # Upload the image
319
+ filename = os.path.basename(image_path)
320
+ data = {
321
+ "image": encoded_image,
322
+ "filename": filename,
323
+ "subfolder": "v2v_input", # Custom subfolder for our workflow
324
+ "type": "input",
325
+ }
326
+ response = self.session.post(f"{self.api_url}/upload/image", json=data)
327
+ return filename, "v2v_input"
328
+
329
+ def wait_for_prompt(self, prompt_id: str) -> Dict[str, Any]:
330
+ """
331
+ Wait for prompt to complete.
332
+
333
+ Args:
334
+ prompt_id: Prompt ID
335
+
336
+ Returns:
337
+ Prompt result
338
+ """
339
+ # Create a spinner to indicate processing
340
+ with click.progressbar(
341
+ length=100, label=f"Processing frame with ComfyUI (ID: {prompt_id})"
342
+ ) as bar:
343
+ progress = 0
344
+ while progress < 100:
345
+ response = self.session.get(f"{self.api_url}/history/{prompt_id}")
346
+ if response.status_code == 200:
347
+ data = response.json()
348
+ if prompt_id in data:
349
+ if "outputs" in data[prompt_id]:
350
+ # Complete
351
+ bar.update(100 - progress)
352
+ return data[prompt_id]
353
+ # Update progress based on execution state
354
+ if "executed" in data[prompt_id]:
355
+ executed_nodes = len(data[prompt_id]["executed"])
356
+ total_nodes = len(data[prompt_id].get("prompt", {}))
357
+ if total_nodes > 0:
358
+ new_progress = min(99, int((executed_nodes / total_nodes) * 100))
359
+ bar.update(new_progress - progress)
360
+ progress = new_progress
361
+
362
+ time.sleep(0.5)
363
+
364
+ # If we get here, assume it's done and try to get the final result
365
+ response = self.session.get(f"{self.api_url}/history/{prompt_id}")
366
+ if response.status_code == 200:
367
+ data = response.json()
368
+ if prompt_id in data:
369
+ return data[prompt_id]
370
+
371
+ raise ValueError(f"Processing failed for prompt {prompt_id}")
372
+
373
+ def process_frame(
374
+ self,
375
+ frame_path: str,
376
+ prompt: str,
377
+ negative_prompt: str = "",
378
+ strength: float = 0.75,
379
+ guidance_scale: float = 7.5,
380
+ use_controlnet: bool = True,
381
+ use_lora: bool = True,
382
+ ) -> str:
383
+ """
384
+ Process a frame using ComfyUI.
385
+
386
+ Args:
387
+ frame_path: Path to input frame
388
+ prompt: Text prompt for generation
389
+ negative_prompt: Negative text prompt
390
+ strength: Denoising strength (0-1)
391
+ guidance_scale: Classifier-free guidance scale
392
+ use_controlnet: Whether to use ControlNet
393
+ use_lora: Whether to use LoRA
394
+
395
+ Returns:
396
+ Path to processed frame
397
+ """
398
+ # Upload the frame
399
+ filename, subfolder = self.upload_image(frame_path)
400
+
401
+ # Build workflow for frame processing
402
+ workflow = self.build_frame_processing_workflow(
403
+ filename=filename,
404
+ subfolder=subfolder,
405
+ prompt=prompt,
406
+ negative_prompt=negative_prompt,
407
+ strength=strength,
408
+ guidance_scale=guidance_scale,
409
+ use_controlnet=use_controlnet,
410
+ use_lora=use_lora,
411
+ )
412
+
413
+ # Queue the prompt
414
+ prompt_id = self.queue_prompt(workflow)
415
+
416
+ # Wait for completion
417
+ result = self.wait_for_prompt(prompt_id)
418
+
419
+ # Get the output image
420
+ output_node = None
421
+ for node_id, node_output in result["outputs"].items():
422
+ if "images" in node_output:
423
+ output_node = node_output
424
+ break
425
+
426
+ if output_node is None:
427
+ raise ValueError("No output image found in result")
428
+
429
+ # Save the output image
430
+ output_filename = output_node["images"][0]["filename"]
431
+ output_subfolder = output_node["images"][0]["subfolder"]
432
+
433
+ output_path = os.path.join(CONFIG["temp_dir"], f"processed_{os.path.basename(frame_path)}")
434
+
435
+ # Download and save
436
+ img = self.get_image(output_filename, output_subfolder, "output")
437
+ img.save(output_path)
438
+
439
+ return output_path
440
+
441
+ def build_frame_processing_workflow(
442
+ self,
443
+ filename: str,
444
+ subfolder: str,
445
+ prompt: str,
446
+ negative_prompt: str,
447
+ strength: float,
448
+ guidance_scale: float,
449
+ use_controlnet: bool,
450
+ use_lora: bool,
451
+ ) -> Dict[str, Any]:
452
+ """
453
+ Build a ComfyUI workflow for frame processing.
454
+
455
+ Args:
456
+ filename: Input frame filename
457
+ subfolder: Input frame subfolder
458
+ prompt: Text prompt
459
+ negative_prompt: Negative text prompt
460
+ strength: Denoising strength
461
+ guidance_scale: CFG scale
462
+ use_controlnet: Whether to use ControlNet
463
+ use_lora: Whether to use LoRA
464
+
465
+ Returns:
466
+ ComfyUI workflow dictionary
467
+ """
468
+ # This is a simplified workflow that would need to be adjusted based on
469
+ # your exact ComfyUI nodes and workflow requirements
470
+
471
+ workflow = {
472
+ "1": {"inputs": {"image": f"{subfolder}/{filename}"}, "class_type": "LoadImage"},
473
+ "2": {"inputs": {"text": prompt, "clip": ["5", 0]}, "class_type": "CLIPTextEncode"},
474
+ "3": {
475
+ "inputs": {"text": negative_prompt, "clip": ["5", 0]},
476
+ "class_type": "CLIPTextEncode",
477
+ },
478
+ "4": {
479
+ "inputs": {
480
+ "seed": 42,
481
+ "steps": 20,
482
+ "cfg": guidance_scale,
483
+ "sampler_name": "dpmpp_2m",
484
+ "scheduler": "karras",
485
+ "denoise": strength,
486
+ "model": ["5", 0],
487
+ "positive": ["2", 0],
488
+ "negative": ["3", 0],
489
+ "latent_image": ["10", 0],
490
+ },
491
+ "class_type": "KSampler",
492
+ },
493
+ "5": {
494
+ "inputs": {"model_name": os.path.basename(CONFIG["hunyuan_video_model"])},
495
+ "class_type": "HunyuanVideoModelLoader",
496
+ },
497
+ "6": {
498
+ "inputs": {"vae_name": os.path.basename(CONFIG["hunyuan_vae"])},
499
+ "class_type": "HunyuanVideoVAELoader",
500
+ },
501
+ "7": {"inputs": {"samples": ["4", 0], "vae": ["6", 0]}, "class_type": "VAEDecode"},
502
+ "8": {
503
+ "inputs": {"filename_prefix": "v2v_output", "images": ["7", 0]},
504
+ "class_type": "SaveImage",
505
+ },
506
+ "9": {
507
+ "inputs": {"image": ["1", 0], "text_encoder": ["5", 1], "vae": ["6", 0]},
508
+ "class_type": "HunyuanImagePreprocessor",
509
+ },
510
+ "10": {"inputs": {"pixels": ["9", 0], "vae": ["6", 0]}, "class_type": "VAEEncode"},
511
+ }
512
+
513
+ # Add LoRA if requested
514
+ if use_lora:
515
+ workflow["11"] = {
516
+ "inputs": {
517
+ "model": ["5", 0],
518
+ "clip": ["5", 0],
519
+ "lora_name": os.path.basename(CONFIG["lora_model"]),
520
+ "strength_model": 0.8,
521
+ "strength_clip": 0.8,
522
+ },
523
+ "class_type": "LoraLoader",
524
+ }
525
+ # Update model and clip reference
526
+ workflow["4"]["inputs"]["model"] = ["11", 0]
527
+ workflow["2"]["inputs"]["clip"] = ["11", 1]
528
+ workflow["3"]["inputs"]["clip"] = ["11", 1]
529
+
530
+ # Add ControlNet if requested
531
+ if use_controlnet:
532
+ workflow["12"] = {
533
+ "inputs": {"control_net_name": os.path.basename(CONFIG["controlnet_model"])},
534
+ "class_type": "ControlNetLoader",
535
+ }
536
+ workflow["13"] = {
537
+ "inputs": {"image": ["1", 0], "control_net": ["12", 0], "strength": 0.6},
538
+ "class_type": "ControlNetApply",
539
+ }
540
+ # Update model with ControlNet
541
+ if use_lora:
542
+ workflow["13"]["inputs"]["model"] = ["11", 0]
543
+ workflow["4"]["inputs"]["model"] = ["13", 0]
544
+ else:
545
+ workflow["13"]["inputs"]["model"] = ["5", 0]
546
+ workflow["4"]["inputs"]["model"] = ["13", 0]
547
+
548
+ return workflow
549
+
550
+
551
+ class VideoToVideoGenerator:
552
+ """Main class for video-to-video generation workflow."""
553
+
554
+ def __init__(self):
555
+ self.video_processor = VideoProcessor()
556
+ self.comfyui_client = ComfyUIClient()
557
+
558
+ def generate(
559
+ self,
560
+ input_video: str,
561
+ output_video: str,
562
+ prompt: str,
563
+ negative_prompt: str = "",
564
+ fps: int = 8,
565
+ strength: float = 0.75,
566
+ guidance_scale: float = 7.5,
567
+ use_controlnet: bool = True,
568
+ use_lora: bool = True,
569
+ use_temporal_consistency: bool = True,
570
+ batch_size: int = 1,
571
+ ) -> str:
572
+ """
573
+ Generate a video using the video-to-video workflow.
574
+
575
+ Args:
576
+ input_video: Path to input video
577
+ output_video: Path to output video
578
+ prompt: Text prompt for generation
579
+ negative_prompt: Negative text prompt
580
+ fps: Frames per second to process
581
+ strength: Denoising strength (0-1)
582
+ guidance_scale: Classifier-free guidance scale
583
+ use_controlnet: Whether to use ControlNet
584
+ use_lora: Whether to use LoRA
585
+ use_temporal_consistency: Whether to apply temporal consistency
586
+ batch_size: Number of frames to process in parallel
587
+
588
+ Returns:
589
+ Path to output video
590
+ """
591
+ click.echo(click.style("=" * 80, fg="bright_blue"))
592
+ click.echo(
593
+ click.style(
594
+ f"Video-to-Video Generation: {input_video} → {output_video}", fg="bright_blue"
595
+ )
596
+ )
597
+ click.echo(click.style("=" * 80, fg="bright_blue"))
598
+ click.echo(f"Settings:\n- Prompt: {prompt}")
599
+ if negative_prompt:
600
+ click.echo(f"- Negative prompt: {negative_prompt}")
601
+ click.echo(f"- Processing at {fps} FPS")
602
+ click.echo(f"- Denoising strength: {strength}")
603
+ click.echo(f"- Guidance scale: {guidance_scale}")
604
+ click.echo(f"- Using ControlNet: {use_controlnet}")
605
+ click.echo(f"- Using LoRA: {use_lora}")
606
+ click.echo(f"- Temporal consistency: {use_temporal_consistency}")
607
+ click.echo(f"- Batch size: {batch_size}")
608
+ click.echo(click.style("=" * 80, fg="bright_blue"))
609
+
610
+ # Extract frames from input video
611
+ frame_paths = self.video_processor.extract_frames(input_video, fps=fps)
612
+
613
+ # Extract motion data for temporal consistency if needed
614
+ motion_data = {}
615
+ if use_temporal_consistency:
616
+ motion_data = self.video_processor.extract_motion_vectors(input_video)
617
+
618
+ # Process frames
619
+ processed_frame_paths = self.process_frames(
620
+ frame_paths=frame_paths,
621
+ prompt=prompt,
622
+ negative_prompt=negative_prompt,
623
+ strength=strength,
624
+ guidance_scale=guidance_scale,
625
+ use_controlnet=use_controlnet,
626
+ use_lora=use_lora,
627
+ batch_size=batch_size,
628
+ )
629
+
630
+ # Apply temporal consistency if requested
631
+ if use_temporal_consistency:
632
+ processed_frame_paths = self.video_processor.apply_temporal_consistency(
633
+ processed_frame_paths, motion_data
634
+ )
635
+
636
+ # Convert frames to video
637
+ output_path = self.video_processor.frames_to_video(
638
+ frame_paths=processed_frame_paths, output_path=output_video, fps=fps
639
+ )
640
+
641
+ click.echo(click.style("=" * 80, fg="bright_green"))
642
+ click.echo(click.style(f"Video generation complete: {output_path}", fg="bright_green"))
643
+ click.echo(click.style("=" * 80, fg="bright_green"))
644
+ return output_path
645
+
646
+ def process_frames(
647
+ self,
648
+ frame_paths: List[str],
649
+ prompt: str,
650
+ negative_prompt: str = "",
651
+ strength: float = 0.75,
652
+ guidance_scale: float = 7.5,
653
+ use_controlnet: bool = True,
654
+ use_lora: bool = True,
655
+ batch_size: int = 1,
656
+ ) -> List[str]:
657
+ """
658
+ Process frames using ComfyUI.
659
+
660
+ Args:
661
+ frame_paths: List of paths to frames
662
+ prompt: Text prompt for generation
663
+ negative_prompt: Negative text prompt
664
+ strength: Denoising strength (0-1)
665
+ guidance_scale: Classifier-free guidance scale
666
+ use_controlnet: Whether to use ControlNet
667
+ use_lora: Whether to use LoRA
668
+ batch_size: Number of frames to process in parallel
669
+
670
+ Returns:
671
+ List of paths to processed frames
672
+ """
673
+ click.echo(click.style(f"Processing {len(frame_paths)} frames...", fg="green"))
674
+ processed_frame_paths = []
675
+
676
+ if batch_size <= 1:
677
+ # Process frames sequentially
678
+ with click.progressbar(
679
+ enumerate(frame_paths), length=len(frame_paths), label="Processing frames"
680
+ ) as bar:
681
+ for i, frame_path in bar:
682
+ processed_frame_path = self.comfyui_client.process_frame(
683
+ frame_path=frame_path,
684
+ prompt=prompt,
685
+ negative_prompt=negative_prompt,
686
+ strength=strength,
687
+ guidance_scale=guidance_scale,
688
+ use_controlnet=use_controlnet,
689
+ use_lora=use_lora,
690
+ )
691
+ processed_frame_paths.append(processed_frame_path)
692
+ else:
693
+ # Process frames in parallel batches
694
+ for i in range(0, len(frame_paths), batch_size):
695
+ batch = frame_paths[i : i + batch_size]
696
+ click.echo(
697
+ f"Processing batch {i//batch_size+1}/{(len(frame_paths)-1)//batch_size+1} ({len(batch)} frames)"
698
+ )
699
+
700
+ # Use threading to process batch in parallel
701
+ threads = []
702
+ results_queue = queue.Queue()
703
+
704
+ for j, frame_path in enumerate(batch):
705
+ thread = threading.Thread(
706
+ target=self._process_frame_thread,
707
+ args=(
708
+ frame_path,
709
+ prompt,
710
+ negative_prompt,
711
+ strength,
712
+ guidance_scale,
713
+ use_controlnet,
714
+ use_lora,
715
+ i + j,
716
+ results_queue,
717
+ ),
718
+ )
719
+ threads.append(thread)
720
+ thread.start()
721
+
722
+ # Wait for all threads to complete
723
+ with click.progressbar(
724
+ length=len(batch), label=f"Batch {i//batch_size+1} progress"
725
+ ) as bar:
726
+ completed = 0
727
+ while completed < len(batch):
728
+ # Check how many items are in the queue
729
+ new_completed = results_queue.qsize() - completed
730
+ if new_completed > 0:
731
+ bar.update(new_completed)
732
+ completed += new_completed
733
+ time.sleep(0.5)
734
+
735
+ # Join all threads
736
+ for thread in threads:
737
+ thread.join()
738
+
739
+ # Get results in correct order
740
+ batch_results = []
741
+ while not results_queue.empty():
742
+ batch_results.append(results_queue.get())
743
+
744
+ # Sort by index
745
+ batch_results.sort(key=lambda x: x[0])
746
+
747
+ # Add to processed frames
748
+ processed_frame_paths.extend([result[1] for result in batch_results])
749
+
750
+ return processed_frame_paths
751
+
752
+ def _process_frame_thread(
753
+ self,
754
+ frame_path: str,
755
+ prompt: str,
756
+ negative_prompt: str,
757
+ strength: float,
758
+ guidance_scale: float,
759
+ use_controlnet: bool,
760
+ use_lora: bool,
761
+ index: int,
762
+ results_queue: queue.Queue,
763
+ ):
764
+ """
765
+ Thread function for processing a frame.
766
+
767
+ Args:
768
+ frame_path: Path to input frame
769
+ prompt: Text prompt for generation
770
+ negative_prompt: Negative text prompt
771
+ strength: Denoising strength (0-1)
772
+ guidance_scale: Classifier-free guidance scale
773
+ use_controlnet: Whether to use ControlNet
774
+ use_lora: Whether to use LoRA
775
+ index: Frame index
776
+ results_queue: Queue to store results
777
+ """
778
+ try:
779
+ processed_frame_path = self.comfyui_client.process_frame(
780
+ frame_path=frame_path,
781
+ prompt=prompt,
782
+ negative_prompt=negative_prompt,
783
+ strength=strength,
784
+ guidance_scale=guidance_scale,
785
+ use_controlnet=use_controlnet,
786
+ use_lora=use_lora,
787
+ )
788
+ results_queue.put((index, processed_frame_path))
789
+ except Exception as e:
790
+ click.echo(click.style(f"Error processing frame {index}: {e}", fg="red"))
791
+ # Put original frame in queue to maintain sequence
792
+ results_queue.put((index, frame_path))
793
+
794
+
795
+ @click.group(name="model")
796
+ def model():
797
+ """Video-to-video generation workflow using ComfyUI and Hunyuan video models."""
798
+ pass
799
+
800
+
801
+ @model.command()
802
+ @click.option("--input", "-i", required=True, help="Path to input video file")
803
+ @click.option("--output", "-o", required=True, help="Path for output video file")
804
+ @click.option("--prompt", "-p", required=True, help="Text prompt for generation")
805
+ @click.option("--negative", "-n", default="", help="Negative text prompt")
806
+ @click.option("--fps", default=8, help="Frames per second to process", type=int)
807
+ @click.option("--strength", "-s", default=0.75, help="Denoising strength (0-1)", type=float)
808
+ @click.option("--guidance", "-g", default=7.5, help="Guidance scale", type=float)
809
+ @click.option("--batch", "-b", default=1, help="Batch size for parallel processing", type=int)
810
+ @click.option("--no-controlnet", is_flag=True, help="Disable ControlNet")
811
+ @click.option("--no-lora", is_flag=True, help="Disable LoRA")
812
+ @click.option("--no-temporal", is_flag=True, help="Disable temporal consistency")
813
+ @click.option("--model", help="Override Hunyuan video model path", type=str)
814
+ @click.option("--lora-model", help="Override LoRA model path", type=str)
815
+ @click.option("--controlnet-model", help="Override ControlNet model path", type=str)
816
+ def generate(
817
+ input,
818
+ output,
819
+ prompt,
820
+ negative,
821
+ fps,
822
+ strength,
823
+ guidance,
824
+ batch,
825
+ no_controlnet,
826
+ no_lora,
827
+ no_temporal,
828
+ model,
829
+ lora_model,
830
+ controlnet_model,
831
+ ):
832
+ """Generate a video using video-to-video translation with AI models."""
833
+ # Override model paths if specified
834
+ if model:
835
+ CONFIG["hunyuan_video_model"] = model
836
+ if lora_model:
837
+ CONFIG["lora_model"] = lora_model
838
+ if controlnet_model:
839
+ CONFIG["controlnet_model"] = controlnet_model
840
+
841
+ # Create generator and process the video
842
+ generator = VideoToVideoGenerator()
843
+
844
+ # Display a nice header with a summary of what will be done
845
+ click.echo("\n" + "=" * 80)
846
+ click.echo(click.style("Video-to-Video Generation Workflow", fg="bright_blue", bold=True))
847
+ click.echo("=" * 80)
848
+
849
+ # Run the generation process
850
+ output_path = generator.generate(
851
+ input_video=input,
852
+ output_video=output,
853
+ prompt=prompt,
854
+ negative_prompt=negative,
855
+ fps=fps,
856
+ strength=strength,
857
+ guidance_scale=guidance,
858
+ use_controlnet=not no_controlnet,
859
+ use_lora=not no_lora,
860
+ use_temporal_consistency=not no_temporal,
861
+ batch_size=batch,
862
+ )
863
+
864
+ # Display completion message
865
+ click.echo("\n" + "=" * 80)
866
+ click.echo(click.style("✓ Generation Complete!", fg="bright_green", bold=True))
867
+ click.echo(f"Output saved to: {click.style(output_path, fg='bright_green', bold=True)}")
868
+ click.echo("=" * 80 + "\n")
869
+
870
+ return {"output_path": output_path, "status": "completed"}
871
+
872
+
873
+ @model.command()
874
+ @click.option("--config-file", "-c", default="v2v_config.json", help="Path to config file")
875
+ @click.option("--output", "-o", default="v2v_config.json", help="Output path for generated config")
876
+ def config(config_file, output):
877
+ """Generate or modify a configuration file for the workflow."""
878
+ if os.path.exists(config_file):
879
+ # Load and modify existing config
880
+ click.echo(f"Loading existing config from {config_file}...")
881
+ with open(config_file, "r") as f:
882
+ existing_config = json.load(f)
883
+
884
+ # Update the existing config with current CONFIG values
885
+ merged_config = {**CONFIG, **existing_config}
886
+
887
+ # Let user modify values interactively
888
+ for key, value in merged_config.items():
889
+ new_value = click.prompt(f"{key}", default=value)
890
+ merged_config[key] = new_value
891
+
892
+ # Save updated config
893
+ with open(output, "w") as f:
894
+ json.dump(merged_config, f, indent=2)
895
+
896
+ click.echo(click.style(f"Updated config saved to {output}", fg="green"))
897
+ else:
898
+ # Create new config from current settings
899
+ click.echo("Creating new configuration file...")
900
+
901
+ # Let user set values interactively
902
+ updated_config = {}
903
+ for key, value in CONFIG.items():
904
+ new_value = click.prompt(f"{key}", default=value)
905
+ updated_config[key] = new_value
906
+
907
+ # Save new config
908
+ with open(output, "w") as f:
909
+ json.dump(updated_config, f, indent=2)
910
+
911
+ click.echo(click.style(f"Configuration file created at {output}", fg="green"))
912
+
913
+ return {"config_file": output, "status": "saved"}
914
+
915
+
916
+ @model.command()
917
+ @click.option("--path", "-p", default="./models", help="Path to search for models")
918
+ def list_models(path):
919
+ """List available models that can be used with the workflow."""
920
+ models = {"Video Models": [], "VAE Models": [], "LoRA Models": [], "ControlNet Models": []}
921
+
922
+ # Function to check if file is potentially a model
923
+ def is_model(file):
924
+ return file.endswith((".safetensors", ".ckpt", ".pt", ".bin"))
925
+
926
+ # Walk through directories and collect models
927
+ for root, dirs, files in os.walk(path):
928
+ for file in files:
929
+ if is_model(file):
930
+ full_path = os.path.join(root, file)
931
+ rel_path = os.path.relpath(full_path, ".")
932
+
933
+ # Categorize models based on path or name
934
+ if "diffusion_models" in root or "hunyuan_video" in file:
935
+ models["Video Models"].append(rel_path)
936
+ elif "vae" in root.lower() or "vae" in file.lower():
937
+ models["VAE Models"].append(rel_path)
938
+ elif "lora" in root.lower() or "lora" in file.lower():
939
+ models["LoRA Models"].append(rel_path)
940
+ elif "controlnet" in root.lower() or "control" in file.lower():
941
+ models["ControlNet Models"].append(rel_path)
942
+ # Add to video models by default if we can't categorize
943
+ elif "video" in file.lower():
944
+ models["Video Models"].append(rel_path)
945
+
946
+ # Display results
947
+ click.echo("\n" + "=" * 80)
948
+ click.echo(click.style("Available Models", fg="bright_blue", bold=True))
949
+ click.echo("=" * 80)
950
+
951
+ for category, model_list in models.items():
952
+ if model_list:
953
+ click.echo(click.style(f"\n{category}:", fg="green", bold=True))
954
+ for i, model in enumerate(model_list, 1):
955
+ click.echo(f"{i}. {model}")
956
+ else:
957
+ click.echo(click.style(f"\n{category}: None found", fg="yellow"))
958
+
959
+ click.echo("\n" + "=" * 80)
960
+ click.echo(
961
+ "Usage: Specify any of these models with the respective options in the generate command"
962
+ )
963
+ click.echo("=" * 80 + "\n")
964
+
965
+ return {"models": models, "search_path": path}
966
+
967
+
968
+ @model.command()
969
+ def check_comfyui():
970
+ """Check if ComfyUI is running and if required nodes are available."""
971
+ click.echo("Checking ComfyUI connection...")
972
+
973
+ try:
974
+ # Try to connect to ComfyUI API
975
+ response = requests.get(f"{CONFIG['comfyui_api']}/object_info")
976
+
977
+ if response.status_code == 200:
978
+ click.echo(click.style("✓ ComfyUI is running!", fg="green"))
979
+
980
+ # Check for required node types
981
+ node_info = response.json()
982
+ required_nodes = [
983
+ "HunyuanVideoModelLoader",
984
+ "HunyuanVideoVAELoader",
985
+ "HunyuanImagePreprocessor",
986
+ "ControlNetLoader",
987
+ "LoraLoader",
988
+ ]
989
+
990
+ missing_nodes = []
991
+ for node in required_nodes:
992
+ if node not in node_info:
993
+ missing_nodes.append(node)
994
+
995
+ if missing_nodes:
996
+ click.echo(click.style("⚠ Warning: Some required nodes are missing:", fg="yellow"))
997
+ for node in missing_nodes:
998
+ click.echo(f" - {node}")
999
+ click.echo("\nYou may need to install additional custom nodes for ComfyUI:")
1000
+ click.echo(" 1. ComfyUI-HunyuanVideoWrapper - For Hunyuan video processing")
1001
+ click.echo(" 2. ComfyUI-ControlNet - For structure preservation")
1002
+ else:
1003
+ click.echo(click.style("✓ All required nodes are available!", fg="green"))
1004
+
1005
+ # Check model availability
1006
+ click.echo("\nChecking for required models...")
1007
+ api_url = f"{CONFIG['comfyui_api']}/model_list"
1008
+ response = requests.get(api_url)
1009
+
1010
+ if response.status_code == 200:
1011
+ models_info = response.json()
1012
+
1013
+ # Check for hunyuan models
1014
+ hunyuan_found = False
1015
+ for model_type, models in models_info.items():
1016
+ for model in models:
1017
+ if "hunyuan" in model.lower():
1018
+ hunyuan_found = True
1019
+ click.echo(click.style(f"✓ Found Hunyuan model: {model}", fg="green"))
1020
+
1021
+ if not hunyuan_found:
1022
+ click.echo(
1023
+ click.style("⚠ Warning: No Hunyuan models found in ComfyUI", fg="yellow")
1024
+ )
1025
+ click.echo(
1026
+ " Make sure you have the Hunyuan video models installed in your ComfyUI setup"
1027
+ )
1028
+ else:
1029
+ click.echo(click.style("⚠ Could not check model availability", fg="yellow"))
1030
+
1031
+ else:
1032
+ click.echo(click.style("✗ Failed to connect to ComfyUI API", fg="red"))
1033
+ click.echo(f" Received status code: {response.status_code}")
1034
+ except requests.exceptions.RequestException as e:
1035
+ click.echo(click.style("✗ Could not connect to ComfyUI", fg="red"))
1036
+ click.echo(f" Error: {e}")
1037
+ click.echo("\nPlease ensure that ComfyUI is running at {CONFIG['comfyui_api']}")
1038
+ click.echo("You can start ComfyUI with: python main.py --listen 0.0.0.0 --port 8188")
1039
+
1040
+ return {"status": "checked", "comfyui_url": CONFIG["comfyui_api"]}
1041
+
1042
+
1043
+ if __name__ == "__main__":
1044
+ model()
1045
+ # This script is designed to be run as a command line tool
1046
+ # and should be executed in the context of a larger application.
1047
+ # It is not intended to be run as a standalone script.
1048
+ # The script uses the Click library for command line interface
1049
+ # and the ComfyUI API for video generation.
1050
+ # The script is structured to allow for easy modification and
1051
+ # extension, making it suitable for use in a variety of video
1052
+ # generation workflows.
1053
+ # The script is designed to be modular, with separate classes
1054
+ # for handling different aspects of the video generation process.
1055
+ # The main class, VideoToVideoGenerator, orchestrates the
1056
+ # entire workflow, from extracting frames to generating the
1057
+ # final video. The ComfyUIClient class handles communication
1058
+ # with the ComfyUI API, while the VideoProcessor class
1059
+ # manages video processing tasks such as frame extraction
1060
+ # and video encoding.