mcli-framework 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (186) hide show
  1. mcli/app/chat_cmd.py +42 -0
  2. mcli/app/commands_cmd.py +226 -0
  3. mcli/app/completion_cmd.py +216 -0
  4. mcli/app/completion_helpers.py +288 -0
  5. mcli/app/cron_test_cmd.py +697 -0
  6. mcli/app/logs_cmd.py +419 -0
  7. mcli/app/main.py +492 -0
  8. mcli/app/model/model.py +1060 -0
  9. mcli/app/model_cmd.py +227 -0
  10. mcli/app/redis_cmd.py +269 -0
  11. mcli/app/video/video.py +1114 -0
  12. mcli/app/visual_cmd.py +303 -0
  13. mcli/chat/chat.py +2409 -0
  14. mcli/chat/command_rag.py +514 -0
  15. mcli/chat/enhanced_chat.py +652 -0
  16. mcli/chat/system_controller.py +1010 -0
  17. mcli/chat/system_integration.py +1016 -0
  18. mcli/cli.py +25 -0
  19. mcli/config.toml +20 -0
  20. mcli/lib/api/api.py +586 -0
  21. mcli/lib/api/daemon_client.py +203 -0
  22. mcli/lib/api/daemon_client_local.py +44 -0
  23. mcli/lib/api/daemon_decorator.py +217 -0
  24. mcli/lib/api/mcli_decorators.py +1032 -0
  25. mcli/lib/auth/auth.py +85 -0
  26. mcli/lib/auth/aws_manager.py +85 -0
  27. mcli/lib/auth/azure_manager.py +91 -0
  28. mcli/lib/auth/credential_manager.py +192 -0
  29. mcli/lib/auth/gcp_manager.py +93 -0
  30. mcli/lib/auth/key_manager.py +117 -0
  31. mcli/lib/auth/mcli_manager.py +93 -0
  32. mcli/lib/auth/token_manager.py +75 -0
  33. mcli/lib/auth/token_util.py +1011 -0
  34. mcli/lib/config/config.py +47 -0
  35. mcli/lib/discovery/__init__.py +1 -0
  36. mcli/lib/discovery/command_discovery.py +274 -0
  37. mcli/lib/erd/erd.py +1345 -0
  38. mcli/lib/erd/generate_graph.py +453 -0
  39. mcli/lib/files/files.py +76 -0
  40. mcli/lib/fs/fs.py +109 -0
  41. mcli/lib/lib.py +29 -0
  42. mcli/lib/logger/logger.py +611 -0
  43. mcli/lib/performance/optimizer.py +409 -0
  44. mcli/lib/performance/rust_bridge.py +502 -0
  45. mcli/lib/performance/uvloop_config.py +154 -0
  46. mcli/lib/pickles/pickles.py +50 -0
  47. mcli/lib/search/cached_vectorizer.py +479 -0
  48. mcli/lib/services/data_pipeline.py +460 -0
  49. mcli/lib/services/lsh_client.py +441 -0
  50. mcli/lib/services/redis_service.py +387 -0
  51. mcli/lib/shell/shell.py +137 -0
  52. mcli/lib/toml/toml.py +33 -0
  53. mcli/lib/ui/styling.py +47 -0
  54. mcli/lib/ui/visual_effects.py +634 -0
  55. mcli/lib/watcher/watcher.py +185 -0
  56. mcli/ml/api/app.py +215 -0
  57. mcli/ml/api/middleware.py +224 -0
  58. mcli/ml/api/routers/admin_router.py +12 -0
  59. mcli/ml/api/routers/auth_router.py +244 -0
  60. mcli/ml/api/routers/backtest_router.py +12 -0
  61. mcli/ml/api/routers/data_router.py +12 -0
  62. mcli/ml/api/routers/model_router.py +302 -0
  63. mcli/ml/api/routers/monitoring_router.py +12 -0
  64. mcli/ml/api/routers/portfolio_router.py +12 -0
  65. mcli/ml/api/routers/prediction_router.py +267 -0
  66. mcli/ml/api/routers/trade_router.py +12 -0
  67. mcli/ml/api/routers/websocket_router.py +76 -0
  68. mcli/ml/api/schemas.py +64 -0
  69. mcli/ml/auth/auth_manager.py +425 -0
  70. mcli/ml/auth/models.py +154 -0
  71. mcli/ml/auth/permissions.py +302 -0
  72. mcli/ml/backtesting/backtest_engine.py +502 -0
  73. mcli/ml/backtesting/performance_metrics.py +393 -0
  74. mcli/ml/cache.py +400 -0
  75. mcli/ml/cli/main.py +398 -0
  76. mcli/ml/config/settings.py +394 -0
  77. mcli/ml/configs/dvc_config.py +230 -0
  78. mcli/ml/configs/mlflow_config.py +131 -0
  79. mcli/ml/configs/mlops_manager.py +293 -0
  80. mcli/ml/dashboard/app.py +532 -0
  81. mcli/ml/dashboard/app_integrated.py +738 -0
  82. mcli/ml/dashboard/app_supabase.py +560 -0
  83. mcli/ml/dashboard/app_training.py +615 -0
  84. mcli/ml/dashboard/cli.py +51 -0
  85. mcli/ml/data_ingestion/api_connectors.py +501 -0
  86. mcli/ml/data_ingestion/data_pipeline.py +567 -0
  87. mcli/ml/data_ingestion/stream_processor.py +512 -0
  88. mcli/ml/database/migrations/env.py +94 -0
  89. mcli/ml/database/models.py +667 -0
  90. mcli/ml/database/session.py +200 -0
  91. mcli/ml/experimentation/ab_testing.py +845 -0
  92. mcli/ml/features/ensemble_features.py +607 -0
  93. mcli/ml/features/political_features.py +676 -0
  94. mcli/ml/features/recommendation_engine.py +809 -0
  95. mcli/ml/features/stock_features.py +573 -0
  96. mcli/ml/features/test_feature_engineering.py +346 -0
  97. mcli/ml/logging.py +85 -0
  98. mcli/ml/mlops/data_versioning.py +518 -0
  99. mcli/ml/mlops/experiment_tracker.py +377 -0
  100. mcli/ml/mlops/model_serving.py +481 -0
  101. mcli/ml/mlops/pipeline_orchestrator.py +614 -0
  102. mcli/ml/models/base_models.py +324 -0
  103. mcli/ml/models/ensemble_models.py +675 -0
  104. mcli/ml/models/recommendation_models.py +474 -0
  105. mcli/ml/models/test_models.py +487 -0
  106. mcli/ml/monitoring/drift_detection.py +676 -0
  107. mcli/ml/monitoring/metrics.py +45 -0
  108. mcli/ml/optimization/portfolio_optimizer.py +834 -0
  109. mcli/ml/preprocessing/data_cleaners.py +451 -0
  110. mcli/ml/preprocessing/feature_extractors.py +491 -0
  111. mcli/ml/preprocessing/ml_pipeline.py +382 -0
  112. mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
  113. mcli/ml/preprocessing/test_preprocessing.py +294 -0
  114. mcli/ml/scripts/populate_sample_data.py +200 -0
  115. mcli/ml/tasks.py +400 -0
  116. mcli/ml/tests/test_integration.py +429 -0
  117. mcli/ml/tests/test_training_dashboard.py +387 -0
  118. mcli/public/oi/oi.py +15 -0
  119. mcli/public/public.py +4 -0
  120. mcli/self/self_cmd.py +1246 -0
  121. mcli/workflow/daemon/api_daemon.py +800 -0
  122. mcli/workflow/daemon/async_command_database.py +681 -0
  123. mcli/workflow/daemon/async_process_manager.py +591 -0
  124. mcli/workflow/daemon/client.py +530 -0
  125. mcli/workflow/daemon/commands.py +1196 -0
  126. mcli/workflow/daemon/daemon.py +905 -0
  127. mcli/workflow/daemon/daemon_api.py +59 -0
  128. mcli/workflow/daemon/enhanced_daemon.py +571 -0
  129. mcli/workflow/daemon/process_cli.py +244 -0
  130. mcli/workflow/daemon/process_manager.py +439 -0
  131. mcli/workflow/daemon/test_daemon.py +275 -0
  132. mcli/workflow/dashboard/dashboard_cmd.py +113 -0
  133. mcli/workflow/docker/docker.py +0 -0
  134. mcli/workflow/file/file.py +100 -0
  135. mcli/workflow/gcloud/config.toml +21 -0
  136. mcli/workflow/gcloud/gcloud.py +58 -0
  137. mcli/workflow/git_commit/ai_service.py +328 -0
  138. mcli/workflow/git_commit/commands.py +430 -0
  139. mcli/workflow/lsh_integration.py +355 -0
  140. mcli/workflow/model_service/client.py +594 -0
  141. mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
  142. mcli/workflow/model_service/lightweight_embedder.py +397 -0
  143. mcli/workflow/model_service/lightweight_model_server.py +714 -0
  144. mcli/workflow/model_service/lightweight_test.py +241 -0
  145. mcli/workflow/model_service/model_service.py +1955 -0
  146. mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
  147. mcli/workflow/model_service/pdf_processor.py +386 -0
  148. mcli/workflow/model_service/test_efficient_runner.py +234 -0
  149. mcli/workflow/model_service/test_example.py +315 -0
  150. mcli/workflow/model_service/test_integration.py +131 -0
  151. mcli/workflow/model_service/test_new_features.py +149 -0
  152. mcli/workflow/openai/openai.py +99 -0
  153. mcli/workflow/politician_trading/commands.py +1790 -0
  154. mcli/workflow/politician_trading/config.py +134 -0
  155. mcli/workflow/politician_trading/connectivity.py +490 -0
  156. mcli/workflow/politician_trading/data_sources.py +395 -0
  157. mcli/workflow/politician_trading/database.py +410 -0
  158. mcli/workflow/politician_trading/demo.py +248 -0
  159. mcli/workflow/politician_trading/models.py +165 -0
  160. mcli/workflow/politician_trading/monitoring.py +413 -0
  161. mcli/workflow/politician_trading/scrapers.py +966 -0
  162. mcli/workflow/politician_trading/scrapers_california.py +412 -0
  163. mcli/workflow/politician_trading/scrapers_eu.py +377 -0
  164. mcli/workflow/politician_trading/scrapers_uk.py +350 -0
  165. mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
  166. mcli/workflow/politician_trading/supabase_functions.py +354 -0
  167. mcli/workflow/politician_trading/workflow.py +852 -0
  168. mcli/workflow/registry/registry.py +180 -0
  169. mcli/workflow/repo/repo.py +223 -0
  170. mcli/workflow/scheduler/commands.py +493 -0
  171. mcli/workflow/scheduler/cron_parser.py +238 -0
  172. mcli/workflow/scheduler/job.py +182 -0
  173. mcli/workflow/scheduler/monitor.py +139 -0
  174. mcli/workflow/scheduler/persistence.py +324 -0
  175. mcli/workflow/scheduler/scheduler.py +679 -0
  176. mcli/workflow/sync/sync_cmd.py +437 -0
  177. mcli/workflow/sync/test_cmd.py +314 -0
  178. mcli/workflow/videos/videos.py +242 -0
  179. mcli/workflow/wakatime/wakatime.py +11 -0
  180. mcli/workflow/workflow.py +37 -0
  181. mcli_framework-7.0.0.dist-info/METADATA +479 -0
  182. mcli_framework-7.0.0.dist-info/RECORD +186 -0
  183. mcli_framework-7.0.0.dist-info/WHEEL +5 -0
  184. mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
  185. mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
  186. mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1114 @@
1
+ import base64
2
+ import json
3
+ import os
4
+ import queue
5
+ import shutil
6
+ import sys
7
+ import tempfile
8
+ import threading
9
+ import time
10
+ import uuid
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List, Optional, Tuple
13
+
14
+ import click
15
+ import cv2
16
+ import numpy as np
17
+ import requests
18
+ from PIL import Image
19
+ from scipy import ndimage, spatial
20
+ from skimage import feature, morphology, restoration
21
+
22
+ # Add this to your existing CONFIG
23
+ CONFIG = {"temp_dir": "./temp", "output_dir": "./output"}
24
+
25
+
26
+ class VideoProcessor:
27
+ """Handles video processing operations including frame extraction and reconstruction."""
28
+
29
+ def __init__(self, temp_dir: str = CONFIG["temp_dir"]):
30
+ self.temp_dir = temp_dir
31
+ os.makedirs(temp_dir, exist_ok=True)
32
+ os.makedirs(CONFIG["output_dir"], exist_ok=True)
33
+
34
+ def extract_frames(self, video_path: str, fps: int = 8) -> List[str]:
35
+ """
36
+ Extract frames from video at specified FPS.
37
+
38
+ Args:
39
+ video_path: Path to input video
40
+ fps: Frames per second to extract
41
+
42
+ Returns:
43
+ List of paths to extracted frames
44
+ """
45
+ click.echo(click.style(f"Extracting frames from {video_path} at {fps} FPS...", fg="green"))
46
+
47
+ # Clean temp directory
48
+ # for file in os.listdir(self.temp_dir):
49
+ # os.remove(os.path.join(self.temp_dir, file))
50
+
51
+ # Extract frames
52
+ video = cv2.VideoCapture(video_path)
53
+ video_fps = video.get(cv2.CAP_PROP_FPS)
54
+ frame_interval = int(video_fps / fps)
55
+ frame_paths = []
56
+
57
+ frame_count = 0
58
+ frame_saved = 0
59
+
60
+ with click.progressbar(
61
+ length=int(video.get(cv2.CAP_PROP_FRAME_COUNT)), label="Extracting frames"
62
+ ) as bar:
63
+ while True:
64
+ success, frame = video.read()
65
+ if not success:
66
+ break
67
+
68
+ if frame_count % frame_interval == 0:
69
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
70
+ frame_path = os.path.join(self.temp_dir, f"frame_{frame_saved:05d}.png")
71
+ Image.fromarray(frame_rgb).save(frame_path)
72
+ frame_paths.append(frame_path)
73
+ frame_saved += 1
74
+
75
+ frame_count += 1
76
+ bar.update(1)
77
+
78
+ video.release()
79
+ click.echo(f"Extracted {len(frame_paths)} frames.")
80
+
81
+ # Save video info for reconstruction
82
+ self.video_info = {
83
+ "original_fps": video_fps,
84
+ "width": int(video.get(cv2.CAP_PROP_FRAME_WIDTH)),
85
+ "height": int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)),
86
+ "total_frames": frame_count,
87
+ }
88
+
89
+ return frame_paths
90
+
91
+ def extract_motion_vectors(self, video_path: str) -> Dict[str, Any]:
92
+ """
93
+ Extract motion vectors from video for temporal consistency.
94
+ This is a simplified placeholder for actual motion vector extraction.
95
+
96
+ Args:
97
+ video_path: Path to input video
98
+
99
+ Returns:
100
+ Dictionary with motion vector data
101
+ """
102
+ # Placeholder for motion vector extraction
103
+ # In a complete implementation, this would use optical flow or
104
+ # dedicated motion vector extraction techniques
105
+ click.echo(click.style("Extracting motion vectors...", fg="blue"))
106
+
107
+ # Simple optical flow calculation between consecutive frames
108
+ video = cv2.VideoCapture(video_path)
109
+ ret, prev_frame = video.read()
110
+ prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
111
+
112
+ motion_data = {}
113
+ frame_idx = 0
114
+
115
+ with click.progressbar(
116
+ length=int(video.get(cv2.CAP_PROP_FRAME_COUNT)) - 1, label="Analyzing motion"
117
+ ) as bar:
118
+ while True:
119
+ ret, frame = video.read()
120
+ if not ret:
121
+ break
122
+
123
+ curr_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
124
+ flow = cv2.calcOpticalFlowFarneback(
125
+ prev_gray,
126
+ curr_gray,
127
+ None,
128
+ pyr_scale=0.5,
129
+ levels=3,
130
+ winsize=15,
131
+ iterations=3,
132
+ poly_n=5,
133
+ poly_sigma=1.2,
134
+ flags=0,
135
+ )
136
+
137
+ # Store compressed flow data
138
+ motion_data[f"frame_{frame_idx:05d}"] = {
139
+ "mean_x": float(np.mean(flow[..., 0])),
140
+ "mean_y": float(np.mean(flow[..., 1])),
141
+ "std_x": float(np.std(flow[..., 0])),
142
+ "std_y": float(np.std(flow[..., 1])),
143
+ }
144
+
145
+ prev_gray = curr_gray
146
+ frame_idx += 1
147
+ bar.update(1)
148
+
149
+ video.release()
150
+ click.echo("Motion analysis complete.")
151
+ return motion_data
152
+
153
+ def frames_to_video(self, frame_paths: List[str], output_path: str, fps: float = None) -> str:
154
+ """
155
+ Convert frames back to video.
156
+
157
+ Args:
158
+ frame_paths: List of paths to frames
159
+ output_path: Path for output video
160
+ fps: Frames per second (defaults to original video FPS)
161
+
162
+ Returns:
163
+ Path to output video
164
+ """
165
+ if not frame_paths:
166
+ raise ValueError("No frames provided")
167
+
168
+ if fps is None:
169
+ fps = self.video_info.get("original_fps", 30)
170
+
171
+ click.echo(
172
+ click.style(
173
+ f"Converting {len(frame_paths)} frames to video at {fps} FPS...", fg="green"
174
+ )
175
+ )
176
+
177
+ # Get dimensions from first frame
178
+ first_frame = cv2.imread(frame_paths[0])
179
+ h, w, _ = first_frame.shape
180
+
181
+ # Initialize video writer
182
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
183
+ video_writer = cv2.VideoWriter(output_path, fourcc, fps, (w, h))
184
+
185
+ # Add frames to video
186
+ with click.progressbar(frame_paths, label="Creating video") as bar:
187
+ for frame_path in bar:
188
+ frame = cv2.imread(frame_path)
189
+ video_writer.write(frame)
190
+
191
+ video_writer.release()
192
+ click.echo(click.style(f"Video saved to {output_path}", fg="bright_green"))
193
+
194
+ return output_path
195
+
196
+ def apply_temporal_consistency(
197
+ self, processed_frames: List[str], motion_data: Dict[str, Any]
198
+ ) -> List[str]:
199
+ """
200
+ Apply temporal consistency to processed frames using motion data.
201
+ This is a simplified implementation.
202
+
203
+ Args:
204
+ processed_frames: List of paths to processed frames
205
+ motion_data: Motion vector data from extract_motion_vectors
206
+
207
+ Returns:
208
+ List of paths to temporally consistent frames
209
+ """
210
+ click.echo(click.style("Applying temporal consistency...", fg="blue"))
211
+
212
+ if len(processed_frames) < 2:
213
+ return processed_frames
214
+
215
+ # Create temp directory for consistent frames
216
+ consistent_dir = os.path.join(self.temp_dir, "consistent")
217
+ os.makedirs(consistent_dir, exist_ok=True)
218
+
219
+ # Simple temporal consistency with weighted blending
220
+ consistent_frame_paths = []
221
+ prev_frame = None
222
+
223
+ with click.progressbar(
224
+ enumerate(processed_frames), length=len(processed_frames), label="Reducing flicker"
225
+ ) as bar:
226
+ for i, frame_path in bar:
227
+ frame = np.array(Image.open(frame_path))
228
+
229
+ if prev_frame is not None:
230
+ # Simple blending with motion-aware weight
231
+ if f"frame_{i-1:05d}" in motion_data:
232
+ motion_info = motion_data[f"frame_{i-1:05d}"]
233
+ # Calculate blending weight based on motion magnitude
234
+ motion_magnitude = np.sqrt(
235
+ motion_info["mean_x"] ** 2 + motion_info["mean_y"] ** 2
236
+ )
237
+ # Less blending when motion is high, more when motion is low
238
+ blend_weight = max(0.1, min(0.3, 0.4 - motion_magnitude * 0.1))
239
+ else:
240
+ blend_weight = 0.2
241
+
242
+ # Blend frames
243
+ blended_frame = cv2.addWeighted(
244
+ prev_frame, blend_weight, frame, 1.0 - blend_weight, 0
245
+ )
246
+ frame = blended_frame
247
+
248
+ # Save consistent frame
249
+ out_path = os.path.join(consistent_dir, f"consistent_{i:05d}.png")
250
+ Image.fromarray(frame.astype(np.uint8)).save(out_path)
251
+ consistent_frame_paths.append(out_path)
252
+
253
+ prev_frame = frame
254
+
255
+ return consistent_frame_paths
256
+
257
+
258
+ class OverlayRemover:
259
+ """Handles detection and removal of colored overlays from video frames."""
260
+
261
+ def __init__(self):
262
+ # Green overlay detection parameters (adjust based on your specific green)
263
+ self.green_lower = np.array([40, 100, 100]) # HSV lower bound
264
+ self.green_upper = np.array([80, 255, 255]) # HSV upper bound
265
+
266
+ # Alternative RGB-based detection for bright green
267
+ self.green_rgb_lower = np.array([0, 200, 0])
268
+ self.green_rgb_upper = np.array([100, 255, 100])
269
+
270
+ def detect_green_overlay(self, frame: np.ndarray, method: str = "hsv") -> np.ndarray:
271
+ """
272
+ Detect green overlay areas in a frame.
273
+
274
+ Args:
275
+ frame: Input frame as numpy array (RGB)
276
+ method: Detection method ("hsv" or "rgb")
277
+
278
+ Returns:
279
+ Binary mask where overlay areas are white (255)
280
+ """
281
+ if method == "hsv":
282
+ # Convert to HSV for better color detection
283
+ hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
284
+ mask = cv2.inRange(hsv, self.green_lower, self.green_upper)
285
+ else:
286
+ # RGB-based detection
287
+ mask = cv2.inRange(frame, self.green_rgb_lower, self.green_rgb_upper)
288
+
289
+ # Morphological operations to clean up the mask
290
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
291
+ mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
292
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
293
+
294
+ # Remove small noise
295
+ mask = (
296
+ morphology.remove_small_objects(mask.astype(bool), min_size=50).astype(np.uint8) * 255
297
+ )
298
+
299
+ return mask
300
+
301
+ def create_inpainting_mask(self, overlay_mask: np.ndarray, dilate_size: int = 3) -> np.ndarray:
302
+ """
303
+ Create an inpainting mask with slight dilation to ensure complete overlay removal.
304
+
305
+ Args:
306
+ overlay_mask: Binary mask of overlay areas
307
+ dilate_size: Size of dilation kernel
308
+
309
+ Returns:
310
+ Dilated mask for inpainting
311
+ """
312
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilate_size, dilate_size))
313
+ inpaint_mask = cv2.dilate(overlay_mask, kernel, iterations=1)
314
+ return inpaint_mask
315
+
316
+ def inpaint_frame(
317
+ self, frame: np.ndarray, mask: np.ndarray, method: str = "telea"
318
+ ) -> np.ndarray:
319
+ """
320
+ Inpaint frame areas marked by mask.
321
+
322
+ Args:
323
+ frame: Input frame (RGB)
324
+ mask: Binary mask marking areas to inpaint
325
+ method: Inpainting method ("telea" or "navier_stokes")
326
+
327
+ Returns:
328
+ Inpainted frame
329
+ """
330
+ # Convert to BGR for OpenCV
331
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
332
+
333
+ # Choose inpainting algorithm
334
+ if method == "telea":
335
+ inpainted_bgr = cv2.inpaint(frame_bgr, mask, 3, cv2.INPAINT_TELEA)
336
+ else:
337
+ inpainted_bgr = cv2.inpaint(frame_bgr, mask, 3, cv2.INPAINT_NS)
338
+
339
+ # Convert back to RGB
340
+ inpainted_rgb = cv2.cvtColor(inpainted_bgr, cv2.COLOR_BGR2RGB)
341
+ return inpainted_rgb
342
+
343
+ def remove_overlay_from_frame(
344
+ self, frame: np.ndarray, debug: bool = False
345
+ ) -> Tuple[np.ndarray, np.ndarray]:
346
+ """
347
+ Complete overlay removal pipeline for a single frame.
348
+
349
+ Args:
350
+ frame: Input frame (RGB)
351
+ debug: If True, return debug information
352
+
353
+ Returns:
354
+ Tuple of (cleaned_frame, overlay_mask)
355
+ """
356
+ # Try HSV detection first
357
+ mask_hsv = self.detect_green_overlay(frame, "hsv")
358
+
359
+ # If HSV doesn't find much, try RGB
360
+ if np.sum(mask_hsv) < 1000: # Threshold for "not much found"
361
+ mask_rgb = self.detect_green_overlay(frame, "rgb")
362
+ overlay_mask = mask_rgb if np.sum(mask_rgb) > np.sum(mask_hsv) else mask_hsv
363
+ else:
364
+ overlay_mask = mask_hsv
365
+
366
+ # Create inpainting mask
367
+ inpaint_mask = self.create_inpainting_mask(overlay_mask)
368
+
369
+ # Inpaint the frame
370
+ cleaned_frame = self.inpaint_frame(frame, inpaint_mask, "telea")
371
+
372
+ return cleaned_frame, overlay_mask
373
+
374
+
375
+ class EnhancedVideoProcessor(VideoProcessor):
376
+ """Enhanced video processor with overlay removal capabilities."""
377
+
378
+ def __init__(self, temp_dir: str = CONFIG["temp_dir"]):
379
+ super().__init__(temp_dir)
380
+ self.overlay_remover = OverlayRemover()
381
+
382
+ # Create subdirectories for different processing stages
383
+ self.masks_dir = os.path.join(temp_dir, "masks")
384
+ self.cleaned_dir = os.path.join(temp_dir, "cleaned")
385
+ os.makedirs(self.masks_dir, exist_ok=True)
386
+ os.makedirs(self.cleaned_dir, exist_ok=True)
387
+
388
+ def process_frames_remove_overlay(
389
+ self, frame_paths: List[str], save_debug: bool = False
390
+ ) -> List[str]:
391
+ """
392
+ Process all frames to remove green overlays.
393
+
394
+ Args:
395
+ frame_paths: List of paths to input frames
396
+ save_debug: Whether to save debug masks
397
+
398
+ Returns:
399
+ List of paths to cleaned frames
400
+ """
401
+ click.echo(click.style("Removing green overlays from frames...", fg="cyan"))
402
+
403
+ cleaned_frame_paths = []
404
+
405
+ with click.progressbar(
406
+ enumerate(frame_paths), length=len(frame_paths), label="Removing overlays"
407
+ ) as bar:
408
+ for i, frame_path in bar:
409
+ # Load frame
410
+ frame = np.array(Image.open(frame_path))
411
+
412
+ # Remove overlay
413
+ cleaned_frame, overlay_mask = self.overlay_remover.remove_overlay_from_frame(frame)
414
+
415
+ # Save cleaned frame
416
+ cleaned_path = os.path.join(self.cleaned_dir, f"cleaned_{i:05d}.png")
417
+ Image.fromarray(cleaned_frame).save(cleaned_path)
418
+ cleaned_frame_paths.append(cleaned_path)
419
+
420
+ # Save debug mask if requested
421
+ if save_debug:
422
+ mask_path = os.path.join(self.masks_dir, f"mask_{i:05d}.png")
423
+ Image.fromarray(overlay_mask).save(mask_path)
424
+
425
+ click.echo(f"Processed {len(cleaned_frame_paths)} frames.")
426
+ return cleaned_frame_paths
427
+
428
+ def analyze_overlay_consistency(self, frame_paths: List[str]) -> Dict[str, Any]:
429
+ """
430
+ Analyze overlay patterns across frames for better temporal consistency.
431
+
432
+ Args:
433
+ frame_paths: List of frame paths
434
+
435
+ Returns:
436
+ Dictionary with overlay analysis data
437
+ """
438
+ click.echo(click.style("Analyzing overlay patterns...", fg="blue"))
439
+
440
+ overlay_stats = {}
441
+
442
+ for i, frame_path in enumerate(
443
+ frame_paths[: min(50, len(frame_paths))]
444
+ ): # Sample first 50 frames
445
+ frame = np.array(Image.open(frame_path))
446
+ mask = self.overlay_remover.detect_green_overlay(frame, "hsv")
447
+
448
+ overlay_stats[f"frame_{i:05d}"] = {
449
+ "overlay_area": int(np.sum(mask > 0)),
450
+ "overlay_density": float(np.sum(mask > 0) / (mask.shape[0] * mask.shape[1])),
451
+ "overlay_centroid": self._calculate_centroid(mask),
452
+ }
453
+
454
+ return overlay_stats
455
+
456
+ def _calculate_centroid(self, mask: np.ndarray) -> Tuple[float, float]:
457
+ """Calculate centroid of overlay mask."""
458
+ if np.sum(mask) == 0:
459
+ return (0.0, 0.0)
460
+
461
+ moments = cv2.moments(mask)
462
+ if moments["m00"] == 0:
463
+ return (0.0, 0.0)
464
+
465
+ cx = moments["m10"] / moments["m00"]
466
+ cy = moments["m01"] / moments["m00"]
467
+ return (float(cx), float(cy))
468
+
469
+ def remove_overlay_from_video(
470
+ self,
471
+ video_path: str,
472
+ output_path: str = None,
473
+ fps: int = 8,
474
+ apply_temporal_smoothing: bool = True,
475
+ ) -> str:
476
+ """
477
+ Complete pipeline to remove overlays from video.
478
+
479
+ Args:
480
+ video_path: Input video path
481
+ output_path: Output video path (auto-generated if None)
482
+ fps: Frame extraction rate
483
+ apply_temporal_smoothing: Whether to apply temporal consistency
484
+
485
+ Returns:
486
+ Path to output video
487
+ """
488
+ if output_path is None:
489
+ base_name = os.path.splitext(os.path.basename(video_path))[0]
490
+ output_path = os.path.join(CONFIG["output_dir"], f"{base_name}_cleaned.mp4")
491
+
492
+ try:
493
+ # Step 1: Extract frames
494
+ frame_paths = self.extract_frames(video_path, fps)
495
+
496
+ # Step 2: Extract motion vectors for temporal consistency
497
+ motion_data = (
498
+ self.extract_motion_vectors(video_path) if apply_temporal_smoothing else None
499
+ )
500
+
501
+ # Step 3: Remove overlays from frames
502
+ cleaned_frame_paths = self.process_frames_remove_overlay(frame_paths, save_debug=True)
503
+
504
+ # Step 4: Apply temporal consistency if requested
505
+ if apply_temporal_smoothing and motion_data:
506
+ final_frame_paths = self.apply_temporal_consistency(
507
+ cleaned_frame_paths, motion_data
508
+ )
509
+ else:
510
+ final_frame_paths = cleaned_frame_paths
511
+
512
+ # Step 5: Convert back to video
513
+ output_video = self.frames_to_video(final_frame_paths, output_path)
514
+
515
+ click.echo(
516
+ click.style(
517
+ f"✅ Overlay removal complete! Output: {output_video}", fg="bright_green"
518
+ )
519
+ )
520
+ return output_video
521
+
522
+ except Exception as e:
523
+ click.echo(click.style(f"❌ Error during processing: {str(e)}", fg="red"))
524
+ raise
525
+
526
+
527
+ # Add this to your existing CONFIG
528
+ CONFIG = {"temp_dir": "./temp", "output_dir": "./output"}
529
+
530
+
531
+ class AdvancedOverlayRemover:
532
+ """Advanced overlay removal with intelligent content reconstruction."""
533
+
534
+ def __init__(self):
535
+ # Green overlay detection parameters
536
+ self.green_lower = np.array([40, 100, 100]) # HSV lower bound
537
+ self.green_upper = np.array([80, 255, 255]) # HSV upper bound
538
+
539
+ # Alternative RGB-based detection for bright green
540
+ self.green_rgb_lower = np.array([0, 180, 0])
541
+ self.green_rgb_upper = np.array([120, 255, 120])
542
+
543
+ # Patch matching parameters
544
+ self.patch_size = 9
545
+ self.search_radius = 50
546
+ self.coherence_threshold = 0.85
547
+
548
+ def detect_green_overlay(self, frame: np.ndarray, method: str = "combined") -> np.ndarray:
549
+ """Enhanced green overlay detection with multiple methods."""
550
+ if method == "combined":
551
+ # HSV detection
552
+ hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
553
+ mask_hsv = cv2.inRange(hsv, self.green_lower, self.green_upper)
554
+
555
+ # RGB detection
556
+ mask_rgb = cv2.inRange(frame, self.green_rgb_lower, self.green_rgb_upper)
557
+
558
+ # Combine masks
559
+ mask = cv2.bitwise_or(mask_hsv, mask_rgb)
560
+
561
+ # Advanced saturation-based detection for bright colors
562
+ saturation = hsv[:, :, 1]
563
+ value = hsv[:, :, 2]
564
+ bright_saturated = (saturation > 150) & (value > 150)
565
+
566
+ # Green hue detection
567
+ hue = hsv[:, :, 0]
568
+ green_hue = (hue >= 40) & (hue <= 80)
569
+
570
+ # Combine all conditions
571
+ advanced_mask = bright_saturated & green_hue
572
+ mask = cv2.bitwise_or(mask, advanced_mask.astype(np.uint8) * 255)
573
+ else:
574
+ # Use single method as before
575
+ if method == "hsv":
576
+ hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
577
+ mask = cv2.inRange(hsv, self.green_lower, self.green_upper)
578
+ else:
579
+ mask = cv2.inRange(frame, self.green_rgb_lower, self.green_rgb_upper)
580
+
581
+ # Enhanced morphological operations
582
+ kernel_small = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
583
+ kernel_medium = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
584
+
585
+ # Close small gaps
586
+ mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel_medium)
587
+ # Remove noise
588
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_small)
589
+
590
+ # Remove small objects and fill holes
591
+ mask = (
592
+ morphology.remove_small_objects(mask.astype(bool), min_size=30).astype(np.uint8) * 255
593
+ )
594
+ mask = (
595
+ morphology.remove_small_holes(mask.astype(bool), area_threshold=100).astype(np.uint8)
596
+ * 255
597
+ )
598
+
599
+ return mask
600
+
601
+ def find_similar_patches(
602
+ self, frame: np.ndarray, mask: np.ndarray, target_point: Tuple[int, int]
603
+ ) -> np.ndarray:
604
+ """Find similar patches in the frame for exemplar-based inpainting."""
605
+ y, x = target_point
606
+ h, w = frame.shape[:2]
607
+
608
+ # Extract target patch (known pixels around the hole)
609
+ half_patch = self.patch_size // 2
610
+ y_start, y_end = max(0, y - half_patch), min(h, y + half_patch + 1)
611
+ x_start, x_end = max(0, x - half_patch), min(w, x + half_patch + 1)
612
+
613
+ target_patch = frame[y_start:y_end, x_start:x_end]
614
+ target_mask = mask[y_start:y_end, x_start:x_end]
615
+
616
+ best_patches = []
617
+ best_scores = []
618
+
619
+ # Search for similar patches in valid areas
620
+ search_y_start = max(0, y - self.search_radius)
621
+ search_y_end = min(h - self.patch_size, y + self.search_radius)
622
+ search_x_start = max(0, x - self.search_radius)
623
+ search_x_end = min(w - self.patch_size, x + self.search_radius)
624
+
625
+ for sy in range(search_y_start, search_y_end, 2): # Step by 2 for speed
626
+ for sx in range(search_x_start, search_x_end, 2):
627
+ candidate_patch = frame[sy : sy + self.patch_size, sx : sx + self.patch_size]
628
+ candidate_mask = mask[sy : sy + self.patch_size, sx : sx + self.patch_size]
629
+
630
+ # Skip if candidate area has overlay
631
+ if np.any(candidate_mask > 0):
632
+ continue
633
+
634
+ # Calculate similarity only on known pixels
635
+ known_pixels = target_mask == 0
636
+ if np.sum(known_pixels) < self.patch_size: # Need enough known pixels
637
+ continue
638
+
639
+ # Resize patches if needed
640
+ if candidate_patch.shape != target_patch.shape:
641
+ continue
642
+
643
+ # Calculate normalized cross-correlation on known pixels
644
+ target_known = target_patch[known_pixels]
645
+ candidate_known = candidate_patch[known_pixels]
646
+
647
+ if len(target_known) > 0:
648
+ correlation = np.corrcoef(target_known.flatten(), candidate_known.flatten())[
649
+ 0, 1
650
+ ]
651
+ if not np.isnan(correlation):
652
+ best_patches.append(candidate_patch)
653
+ best_scores.append(correlation)
654
+
655
+ # Return best matching patches
656
+ if best_patches:
657
+ best_indices = np.argsort(best_scores)[-3:] # Top 3 matches
658
+ return [best_patches[i] for i in best_indices]
659
+ else:
660
+ return []
661
+
662
+ def exemplar_based_inpainting(self, frame: np.ndarray, mask: np.ndarray) -> np.ndarray:
663
+ """Advanced exemplar-based inpainting for intelligent content reconstruction."""
664
+ result = frame.copy()
665
+ mask_to_fill = mask.copy()
666
+
667
+ # Find boundary of the region to fill
668
+ contours, _ = cv2.findContours(mask_to_fill, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
669
+
670
+ for contour in contours:
671
+ # Get bounding box
672
+ x, y, w, h = cv2.boundingRect(contour)
673
+
674
+ # Process region in patches
675
+ for py in range(y, y + h, self.patch_size // 2):
676
+ for px in range(x, x + w, self.patch_size // 2):
677
+ if mask_to_fill[py, px] > 0: # Need to fill this pixel
678
+ # Find similar patches
679
+ similar_patches = self.find_similar_patches(frame, mask, (py, px))
680
+
681
+ if similar_patches:
682
+ # Blend the best patches
683
+ blended_patch = np.mean(similar_patches, axis=0).astype(np.uint8)
684
+
685
+ # Apply to result
686
+ patch_h, patch_w = blended_patch.shape[:2]
687
+ y_end = min(result.shape[0], py + patch_h)
688
+ x_end = min(result.shape[1], px + patch_w)
689
+
690
+ # Only fill masked areas
691
+ patch_mask = mask_to_fill[py:y_end, px:x_end]
692
+ mask_indices = patch_mask > 0
693
+
694
+ if np.any(mask_indices):
695
+ result[py:y_end, px:x_end][mask_indices] = blended_patch[
696
+ : y_end - py, : x_end - px
697
+ ][mask_indices]
698
+
699
+ return result
700
+
701
+ def temporal_reference_inpainting(
702
+ self,
703
+ current_frame: np.ndarray,
704
+ reference_frames: List[np.ndarray],
705
+ mask: np.ndarray,
706
+ motion_vectors: Optional[Dict] = None,
707
+ ) -> np.ndarray:
708
+ """Use multiple reference frames for better content reconstruction."""
709
+ if not reference_frames:
710
+ return self.exemplar_based_inpainting(current_frame, mask)
711
+
712
+ result = current_frame.copy()
713
+
714
+ # For high-speed video, we need to be more selective about reference frames
715
+ # Use frames that are more likely to have similar content
716
+
717
+ # Simple approach: weighted average of valid pixels from reference frames
718
+ valid_pixels = np.zeros_like(current_frame, dtype=np.float32)
719
+ weight_sum = np.zeros(mask.shape, dtype=np.float32)
720
+
721
+ for i, ref_frame in enumerate(reference_frames):
722
+ if ref_frame.shape == current_frame.shape:
723
+ # Calculate weight based on frame distance (closer frames get higher weight)
724
+ weight = 1.0 / (i + 1)
725
+
726
+ # Only use pixels that aren't masked in current frame
727
+ ref_mask = self.detect_green_overlay(ref_frame, "combined")
728
+ valid_mask = (ref_mask == 0) & (mask > 0) # Valid in ref, needs filling in current
729
+
730
+ if np.any(valid_mask):
731
+ valid_pixels[valid_mask] += ref_frame[valid_mask].astype(np.float32) * weight
732
+ weight_sum[valid_mask] += weight
733
+
734
+ # Apply weighted average where we have valid data
735
+ fill_mask = weight_sum > 0
736
+ if np.any(fill_mask):
737
+ # Fix broadcasting issue by expanding weight_sum to match color channels
738
+ weight_sum_expanded = weight_sum[:, :, np.newaxis] # Add channel dimension
739
+ result[fill_mask] = (valid_pixels[fill_mask] / weight_sum_expanded[fill_mask]).astype(
740
+ np.uint8
741
+ )
742
+
743
+ # For remaining unfilled areas, use exemplar-based inpainting
744
+ remaining_mask = mask & (~fill_mask.astype(np.uint8) * 255)
745
+ if np.any(remaining_mask > 0):
746
+ result = self.exemplar_based_inpainting(result, remaining_mask)
747
+
748
+ return result
749
+
750
+ def content_aware_fill(self, frame: np.ndarray, mask: np.ndarray) -> np.ndarray:
751
+ """Advanced content-aware fill using multiple techniques."""
752
+ # Step 1: Try edge-preserving inpainting for smooth areas
753
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
754
+
755
+ # Use Fast Marching Method for smooth regions
756
+ inpainted_fmm = cv2.inpaint(frame_bgr, mask, 3, cv2.INPAINT_TELEA)
757
+
758
+ # Use Navier-Stokes for textured regions
759
+ inpainted_ns = cv2.inpaint(frame_bgr, mask, 3, cv2.INPAINT_NS)
760
+
761
+ # Detect textured vs smooth regions
762
+ gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
763
+
764
+ # Calculate local variance to detect texture
765
+ kernel = np.ones((9, 9), np.float32) / 81
766
+ local_mean = cv2.filter2D(gray.astype(np.float32), -1, kernel)
767
+ local_variance = cv2.filter2D((gray.astype(np.float32) - local_mean) ** 2, -1, kernel)
768
+
769
+ # Create texture mask
770
+ texture_threshold = np.mean(local_variance) * 1.5
771
+ texture_mask = (local_variance > texture_threshold).astype(np.float32)
772
+
773
+ # Smooth the transition
774
+ texture_mask = cv2.GaussianBlur(texture_mask, (15, 15), 5)
775
+
776
+ # Blend based on texture
777
+ result_bgr = inpainted_ns.astype(np.float32) * texture_mask[
778
+ :, :, np.newaxis
779
+ ] + inpainted_fmm.astype(np.float32) * (1 - texture_mask[:, :, np.newaxis])
780
+
781
+ result_rgb = cv2.cvtColor(result_bgr.astype(np.uint8), cv2.COLOR_BGR2RGB)
782
+
783
+ # Step 2: Apply exemplar-based inpainting for better results
784
+ final_result = self.exemplar_based_inpainting(result_rgb, mask)
785
+
786
+ return final_result
787
+
788
+ def remove_overlay_with_context(
789
+ self, frame: np.ndarray, reference_frames: List[np.ndarray] = None
790
+ ) -> Tuple[np.ndarray, np.ndarray]:
791
+ """
792
+ Remove overlay with intelligent content reconstruction using context.
793
+
794
+ Args:
795
+ frame: Current frame
796
+ reference_frames: List of reference frames for temporal consistency
797
+
798
+ Returns:
799
+ Tuple of (cleaned_frame, overlay_mask)
800
+ """
801
+ # Detect overlay
802
+ overlay_mask = self.detect_green_overlay(frame, "combined")
803
+
804
+ if np.sum(overlay_mask) == 0:
805
+ return frame, overlay_mask
806
+
807
+ # Use temporal reference if available and not too many reference frames
808
+ if reference_frames and len(reference_frames) <= 5:
809
+ cleaned_frame = self.temporal_reference_inpainting(
810
+ frame, reference_frames, overlay_mask
811
+ )
812
+ else:
813
+ # Use content-aware fill for single frame processing
814
+ cleaned_frame = self.content_aware_fill(frame, overlay_mask)
815
+
816
+ return cleaned_frame, overlay_mask
817
+
818
+
819
+ class IntelligentVideoProcessor(VideoProcessor):
820
+ """Enhanced processor with intelligent overlay removal capabilities."""
821
+
822
+ def __init__(self, temp_dir: str = CONFIG["temp_dir"]):
823
+ super().__init__(temp_dir)
824
+ self.overlay_remover = AdvancedOverlayRemover()
825
+
826
+ # Create processing directories
827
+ self.masks_dir = os.path.join(temp_dir, "masks")
828
+ self.cleaned_dir = os.path.join(temp_dir, "cleaned")
829
+ self.reference_dir = os.path.join(temp_dir, "references")
830
+ os.makedirs(self.masks_dir, exist_ok=True)
831
+ os.makedirs(self.cleaned_dir, exist_ok=True)
832
+ os.makedirs(self.reference_dir, exist_ok=True)
833
+
834
+ def extract_motion_vectors_highspeed(
835
+ self, video_path: str, sample_rate: int = 10
836
+ ) -> Dict[str, Any]:
837
+ """
838
+ Optimized motion vector extraction for high-speed videos.
839
+ Sample every Nth frame to reduce computation while maintaining accuracy.
840
+ """
841
+ click.echo(click.style("Extracting motion vectors (high-speed optimized)...", fg="blue"))
842
+
843
+ video = cv2.VideoCapture(video_path)
844
+ ret, prev_frame = video.read()
845
+ prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
846
+
847
+ motion_data = {}
848
+ frame_idx = 0
849
+ sample_count = 0
850
+
851
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
852
+
853
+ with click.progressbar(
854
+ length=total_frames // sample_rate, label="Analyzing motion (sampled)"
855
+ ) as bar:
856
+ while True:
857
+ ret, frame = video.read()
858
+ if not ret:
859
+ break
860
+
861
+ # Sample every Nth frame for high-speed videos
862
+ if frame_idx % sample_rate == 0:
863
+ curr_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
864
+
865
+ # Use sparse optical flow for better performance
866
+ corners = cv2.goodFeaturesToTrack(
867
+ prev_gray, maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7
868
+ )
869
+
870
+ if corners is not None:
871
+ # Calculate optical flow
872
+ next_corners, status, error = cv2.calcOpticalFlowPyrLK(
873
+ prev_gray, curr_gray, corners, None
874
+ )
875
+
876
+ # Filter good points
877
+ good_old = corners[status == 1]
878
+ good_new = next_corners[status == 1]
879
+
880
+ if len(good_old) > 0:
881
+ # Calculate motion statistics
882
+ motion_vectors = good_new - good_old
883
+ motion_data[f"frame_{frame_idx:05d}"] = {
884
+ "mean_x": float(np.mean(motion_vectors[:, 0])),
885
+ "mean_y": float(np.mean(motion_vectors[:, 1])),
886
+ "std_x": float(np.std(motion_vectors[:, 0])),
887
+ "std_y": float(np.std(motion_vectors[:, 1])),
888
+ "magnitude": float(np.mean(np.linalg.norm(motion_vectors, axis=1))),
889
+ "sample_points": len(good_old),
890
+ }
891
+
892
+ prev_gray = curr_gray
893
+ sample_count += 1
894
+ bar.update(1)
895
+
896
+ frame_idx += 1
897
+
898
+ video.release()
899
+ click.echo(f"Motion analysis complete. Sampled {sample_count} frames.")
900
+ return motion_data
901
+
902
+ def process_with_temporal_context(
903
+ self, frame_paths: List[str], context_window: int = 3
904
+ ) -> List[str]:
905
+ """
906
+ Process frames with temporal context for better reconstruction.
907
+
908
+ Args:
909
+ frame_paths: List of frame paths
910
+ context_window: Number of frames to use as reference (on each side)
911
+
912
+ Returns:
913
+ List of cleaned frame paths
914
+ """
915
+ click.echo(click.style("Processing with temporal context...", fg="cyan"))
916
+
917
+ cleaned_frame_paths = []
918
+
919
+ # Pre-load some frames for context
920
+ frame_cache = {}
921
+
922
+ with click.progressbar(
923
+ enumerate(frame_paths), length=len(frame_paths), label="Intelligent overlay removal"
924
+ ) as bar:
925
+ for i, frame_path in bar:
926
+ # Load current frame
927
+ current_frame = np.array(Image.open(frame_path))
928
+
929
+ # Collect reference frames
930
+ reference_frames = []
931
+
932
+ # Look backwards and forwards for reference frames
933
+ for offset in range(-context_window, context_window + 1):
934
+ ref_idx = i + offset
935
+ if ref_idx != i and 0 <= ref_idx < len(frame_paths):
936
+ ref_path = frame_paths[ref_idx]
937
+
938
+ # Use cache to avoid reloading
939
+ if ref_path not in frame_cache:
940
+ frame_cache[ref_path] = np.array(Image.open(ref_path))
941
+
942
+ reference_frames.append(frame_cache[ref_path])
943
+
944
+ # Remove overlay with context
945
+ cleaned_frame, overlay_mask = self.overlay_remover.remove_overlay_with_context(
946
+ current_frame, reference_frames
947
+ )
948
+
949
+ # Save results
950
+ cleaned_path = os.path.join(self.cleaned_dir, f"cleaned_{i:05d}.png")
951
+ Image.fromarray(cleaned_frame).save(cleaned_path)
952
+ cleaned_frame_paths.append(cleaned_path)
953
+
954
+ # Save mask for debugging
955
+ mask_path = os.path.join(self.masks_dir, f"mask_{i:05d}.png")
956
+ Image.fromarray(overlay_mask).save(mask_path)
957
+
958
+ # Manage cache size (keep only recent frames)
959
+ if len(frame_cache) > context_window * 4:
960
+ oldest_key = list(frame_cache.keys())[0]
961
+ del frame_cache[oldest_key]
962
+
963
+ click.echo(f"Processed {len(cleaned_frame_paths)} frames with temporal context.")
964
+ return cleaned_frame_paths
965
+
966
+ def remove_overlay_from_video_intelligent(
967
+ self, video_path: str, output_path: str = None, fps: int = 30, context_window: int = 3
968
+ ) -> str:
969
+ """
970
+ Complete pipeline for intelligent overlay removal while maintaining original video speed.
971
+
972
+ Args:
973
+ video_path: Input video path
974
+ output_path: Output video path
975
+ fps: Frame extraction rate (higher = better quality, slower processing)
976
+ context_window: Temporal context window size
977
+
978
+ Returns:
979
+ Path to output video
980
+ """
981
+ if output_path is None:
982
+ base_name = os.path.splitext(os.path.basename(video_path))[0]
983
+ output_path = os.path.join(CONFIG["output_dir"], f"{base_name}_intelligent_cleaned.mp4")
984
+
985
+ try:
986
+ click.echo(click.style("🚀 Starting intelligent overlay removal...", fg="bright_cyan"))
987
+ click.echo(
988
+ click.style(
989
+ f"📊 Extraction FPS: {fps} (higher = better quality, slower processing)",
990
+ fg="blue",
991
+ )
992
+ )
993
+
994
+ # Step 1: Extract frames
995
+ frame_paths = self.extract_frames(video_path, fps)
996
+
997
+ # Step 2: Process with temporal context for intelligent reconstruction
998
+ cleaned_frame_paths = self.process_with_temporal_context(frame_paths, context_window)
999
+
1000
+ # Step 3: Extract motion data (sampled for performance)
1001
+ motion_data = self.extract_motion_vectors_highspeed(
1002
+ video_path, sample_rate=max(1, fps // 6)
1003
+ )
1004
+
1005
+ # Step 4: Apply light temporal smoothing
1006
+ if motion_data:
1007
+ final_frame_paths = self.apply_temporal_consistency(
1008
+ cleaned_frame_paths, motion_data
1009
+ )
1010
+ else:
1011
+ final_frame_paths = cleaned_frame_paths
1012
+
1013
+ # Step 5: Reconstruct video at extraction FPS to maintain original speed
1014
+ output_video = self.frames_to_video(final_frame_paths, output_path)
1015
+
1016
+ click.echo(
1017
+ click.style(
1018
+ f"✅ Intelligent overlay removal complete! Output: {output_video}",
1019
+ fg="bright_green",
1020
+ )
1021
+ )
1022
+ click.echo(
1023
+ click.style(
1024
+ f"⏱️ Original speed maintained using {self.video_info.get('extraction_fps', fps)} FPS",
1025
+ fg="green",
1026
+ )
1027
+ )
1028
+
1029
+ return output_video
1030
+
1031
+ except Exception as e:
1032
+ click.echo(click.style(f"❌ Error during processing: {str(e)}", fg="red"))
1033
+ raise
1034
+
1035
+
1036
+ # Enhanced command line interface
1037
+ @click.command()
1038
+ @click.argument("input_video", type=click.Path(exists=True))
1039
+ @click.option("--output", "-o", type=click.Path(), help="Output video path")
1040
+ @click.option(
1041
+ "--fps",
1042
+ "-f",
1043
+ default=30,
1044
+ help="Frame extraction rate (default: 30 to maintain quality). Higher values = better quality but slower processing",
1045
+ )
1046
+ @click.option(
1047
+ "--output-fps",
1048
+ type=float,
1049
+ help="Output video FPS (defaults to extraction FPS for correct speed)",
1050
+ )
1051
+ @click.option("--context", "-c", default=3, help="Temporal context window size (default: 3)")
1052
+ @click.option(
1053
+ "--method",
1054
+ type=click.Choice(["intelligent", "basic"]),
1055
+ default="intelligent",
1056
+ help="Processing method (default: intelligent)",
1057
+ )
1058
+ def remove_overlay(input_video, output, fps, output_fps, context, method):
1059
+ """Remove overlays from videos with intelligent content reconstruction."""
1060
+
1061
+ if method == "intelligent":
1062
+ processor = IntelligentVideoProcessor()
1063
+
1064
+ # Process video
1065
+ result = processor.remove_overlay_from_video_intelligent(
1066
+ video_path=input_video, output_path=output, fps=fps, context_window=context
1067
+ )
1068
+
1069
+ # If user specified different output FPS, recreate video
1070
+ if output_fps and output_fps != fps:
1071
+ click.echo(
1072
+ click.style(f"Recreating video with custom output FPS: {output_fps}", fg="yellow")
1073
+ )
1074
+
1075
+ # Get the cleaned frames
1076
+ cleaned_frames = [
1077
+ os.path.join(processor.cleaned_dir, f)
1078
+ for f in sorted(os.listdir(processor.cleaned_dir))
1079
+ if f.endswith(".png")
1080
+ ]
1081
+
1082
+ # Create new output path
1083
+ base_name = os.path.splitext(os.path.basename(input_video))[0]
1084
+ custom_output = os.path.join(
1085
+ CONFIG["output_dir"], f"{base_name}_custom_fps_{output_fps}.mp4"
1086
+ )
1087
+
1088
+ # Recreate with custom FPS
1089
+ result = processor.frames_to_video(cleaned_frames, custom_output, output_fps)
1090
+ else:
1091
+ # Fallback to basic processing
1092
+ from your_original_module import EnhancedVideoProcessor
1093
+
1094
+ processor = EnhancedVideoProcessor()
1095
+ result = processor.remove_overlay_from_video(
1096
+ video_path=input_video, output_path=output, fps=fps
1097
+ )
1098
+
1099
+ click.echo(f"Video processed successfully: {result}")
1100
+ click.echo(
1101
+ click.style("📝 Note: Output video should maintain original playback speed", fg="green")
1102
+ )
1103
+
1104
+
1105
+ @click.group()
1106
+ def main():
1107
+ """Advanced video overlay removal tool with intelligent content reconstruction."""
1108
+ pass
1109
+
1110
+
1111
+ main.add_command(remove_overlay)
1112
+
1113
+ if __name__ == "__main__":
1114
+ main()