pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import threading
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from flask import Blueprint, jsonify, request, send_file
|
|
8
|
+
from loguru import logger
|
|
9
|
+
|
|
10
|
+
from ...config import get_config
|
|
11
|
+
from ...paths import get_data_paths
|
|
12
|
+
from ..video_maker import PlotSettings, make_video_from_scalar
|
|
13
|
+
|
|
14
|
+
video_maker_bp = Blueprint("video_maker", __name__, url_prefix="/video")
|
|
15
|
+
|
|
16
|
+
# Constants
|
|
17
|
+
VIDEO_EXTENSIONS = (".mp4", ".avi", ".mov", ".mkv")
|
|
18
|
+
MAX_DEPTH = 5 # For deep search
|
|
19
|
+
|
|
20
|
+
# In-memory video job state with thread-safety
|
|
21
|
+
_video_state: Dict[str, Any] = {
|
|
22
|
+
"processing": False,
|
|
23
|
+
"progress": 0,
|
|
24
|
+
"message": None,
|
|
25
|
+
"started_at": None,
|
|
26
|
+
"finished_at": None,
|
|
27
|
+
"error": None,
|
|
28
|
+
"meta": None,
|
|
29
|
+
"out_path": None,
|
|
30
|
+
"current_frame": 0,
|
|
31
|
+
"total_frames": 0,
|
|
32
|
+
}
|
|
33
|
+
_video_thread: Optional[threading.Thread] = None
|
|
34
|
+
_video_cancel_event = threading.Event()
|
|
35
|
+
_video_state_lock = threading.RLock() # Reentrant lock for safety
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _video_set_state(**kwargs):
|
|
39
|
+
with _video_state_lock:
|
|
40
|
+
_video_state.update(kwargs)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _video_reset_state():
|
|
44
|
+
with _video_state_lock:
|
|
45
|
+
_video_state.update(
|
|
46
|
+
{
|
|
47
|
+
"processing": False,
|
|
48
|
+
"progress": 0,
|
|
49
|
+
"message": None,
|
|
50
|
+
"started_at": None,
|
|
51
|
+
"finished_at": None,
|
|
52
|
+
"error": None,
|
|
53
|
+
"meta": None,
|
|
54
|
+
"out_path": None,
|
|
55
|
+
"current_frame": 0,
|
|
56
|
+
"total_frames": 0,
|
|
57
|
+
}
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def progress_callback(current_frame: int, total_frames: int, message: str = ""):
|
|
62
|
+
"""Thread-safe progress update."""
|
|
63
|
+
_video_set_state(
|
|
64
|
+
progress=int((current_frame / max(total_frames, 1)) * 100),
|
|
65
|
+
current_frame=current_frame,
|
|
66
|
+
total_frames=total_frames,
|
|
67
|
+
message=f"Processing frame {current_frame}/{total_frames}"
|
|
68
|
+
+ (f" - {message}" if message else ""),
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _run_video_job(
|
|
73
|
+
base: Path,
|
|
74
|
+
cam: int,
|
|
75
|
+
num_images: int, # Number of images/files in the folder
|
|
76
|
+
run: int, # Run number (1-based) for run_index
|
|
77
|
+
source_type: str,
|
|
78
|
+
endpoint: str,
|
|
79
|
+
merged_flag: bool,
|
|
80
|
+
var: str,
|
|
81
|
+
pattern: str,
|
|
82
|
+
ps: PlotSettings,
|
|
83
|
+
test_mode: bool = False,
|
|
84
|
+
test_frames: int = 50,
|
|
85
|
+
):
|
|
86
|
+
"""Optimized job with better error handling."""
|
|
87
|
+
try:
|
|
88
|
+
_video_set_state(
|
|
89
|
+
processing=True,
|
|
90
|
+
progress=0,
|
|
91
|
+
started_at=datetime.utcnow().isoformat(),
|
|
92
|
+
message="Initializing video creation",
|
|
93
|
+
error=None,
|
|
94
|
+
meta=None,
|
|
95
|
+
current_frame=0,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
logger.info(
|
|
99
|
+
f"[VIDEO] Starting video job | base='{base}', cam={cam}, num_images={num_images}, run={run}, var={var}, test_mode={test_mode}"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
paths = get_data_paths(
|
|
103
|
+
base, num_images, cam, source_type, endpoint, merged_flag
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
data_dir = Path(paths.get("data_dir"))
|
|
107
|
+
video_dir = Path(paths.get("video_dir"))
|
|
108
|
+
|
|
109
|
+
video_dir.mkdir(parents=True, exist_ok=True)
|
|
110
|
+
|
|
111
|
+
if not Path(ps.out_path).is_absolute():
|
|
112
|
+
ps.out_path = str(video_dir / ps.out_path)
|
|
113
|
+
|
|
114
|
+
ps.progress_callback = progress_callback
|
|
115
|
+
ps.test_mode = test_mode
|
|
116
|
+
ps.test_frames = test_frames if test_mode else None
|
|
117
|
+
|
|
118
|
+
_video_set_state(message="Starting video generation...")
|
|
119
|
+
|
|
120
|
+
meta = make_video_from_scalar(
|
|
121
|
+
data_dir,
|
|
122
|
+
var=var,
|
|
123
|
+
pattern=pattern,
|
|
124
|
+
settings=ps,
|
|
125
|
+
cancel_event=_video_cancel_event,
|
|
126
|
+
run_index=run - 1, # Convert run (1-based) to run_index (0-based)
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if _video_cancel_event.is_set():
|
|
130
|
+
_video_set_state(
|
|
131
|
+
processing=False,
|
|
132
|
+
progress=0,
|
|
133
|
+
message="Video creation was cancelled",
|
|
134
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
135
|
+
error="Cancelled by user",
|
|
136
|
+
)
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
_video_set_state(
|
|
140
|
+
progress=100,
|
|
141
|
+
message="Video completed successfully",
|
|
142
|
+
processing=False,
|
|
143
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
144
|
+
meta=meta,
|
|
145
|
+
out_path=ps.out_path,
|
|
146
|
+
computed_limits={
|
|
147
|
+
"lower": meta.get("vmin"),
|
|
148
|
+
"upper": meta.get("vmax"),
|
|
149
|
+
"actual_min": meta.get("actual_min"),
|
|
150
|
+
"actual_max": meta.get("actual_max"),
|
|
151
|
+
"percentile_based": ps.lower_limit is None or ps.upper_limit is None,
|
|
152
|
+
},
|
|
153
|
+
)
|
|
154
|
+
logger.info(f"[VIDEO] Job completed successfully. Output: {ps.out_path}")
|
|
155
|
+
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.exception(f"[VIDEO] Job failed: {e}")
|
|
158
|
+
_video_set_state(
|
|
159
|
+
processing=False,
|
|
160
|
+
error=str(e),
|
|
161
|
+
message=f"Video creation failed: {str(e)}",
|
|
162
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
@video_maker_bp.route("/list_videos", methods=["GET"])
|
|
167
|
+
def list_videos():
|
|
168
|
+
"""Optimized video listing with glob and caching."""
|
|
169
|
+
try:
|
|
170
|
+
base_path_str = request.args.get("base_path")
|
|
171
|
+
cfg = get_config(refresh=True)
|
|
172
|
+
|
|
173
|
+
base = Path(base_path_str).expanduser() if base_path_str else cfg.base_paths[0]
|
|
174
|
+
|
|
175
|
+
logger.info(f"[VIDEO] Listing videos under base path: {base}")
|
|
176
|
+
|
|
177
|
+
videos: List[str] = []
|
|
178
|
+
|
|
179
|
+
videos_dir = base / "videos"
|
|
180
|
+
if videos_dir.exists():
|
|
181
|
+
for ext in VIDEO_EXTENSIONS:
|
|
182
|
+
videos.extend([str(f) for f in videos_dir.glob(f"**/*{ext}")])
|
|
183
|
+
|
|
184
|
+
cam_dirs = [d for d in base.glob("**/Cam*") if d.is_dir()]
|
|
185
|
+
for cam_dir in cam_dirs:
|
|
186
|
+
for video_subdir in ["videos", "merged/videos"]:
|
|
187
|
+
video_dir = cam_dir / video_subdir
|
|
188
|
+
if video_dir.exists():
|
|
189
|
+
for ext in VIDEO_EXTENSIONS:
|
|
190
|
+
videos.extend([str(f) for f in video_dir.glob(f"*{ext}")])
|
|
191
|
+
|
|
192
|
+
if not videos:
|
|
193
|
+
|
|
194
|
+
def find_videos(directory: Path, current_depth: int = 0) -> List[str]:
|
|
195
|
+
if current_depth > MAX_DEPTH:
|
|
196
|
+
return []
|
|
197
|
+
found = []
|
|
198
|
+
try:
|
|
199
|
+
for item in directory.iterdir():
|
|
200
|
+
if item.is_file() and item.suffix.lower() in VIDEO_EXTENSIONS:
|
|
201
|
+
found.append(str(item))
|
|
202
|
+
elif item.is_dir():
|
|
203
|
+
found.extend(find_videos(item, current_depth + 1))
|
|
204
|
+
except (PermissionError, OSError):
|
|
205
|
+
pass
|
|
206
|
+
return found
|
|
207
|
+
|
|
208
|
+
videos = find_videos(base)
|
|
209
|
+
|
|
210
|
+
videos.sort(
|
|
211
|
+
key=lambda x: os.path.getmtime(x) if os.path.exists(x) else 0, reverse=True
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
logger.info(f"[VIDEO] Found {len(videos)} videos")
|
|
215
|
+
return jsonify({"videos": videos})
|
|
216
|
+
except Exception as e:
|
|
217
|
+
logger.exception(f"[VIDEO] Failed to list videos: {e}")
|
|
218
|
+
return jsonify({"error": str(e), "videos": []}), 500
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
@video_maker_bp.route("/start_video", methods=["POST"])
|
|
222
|
+
def start_video():
|
|
223
|
+
"""
|
|
224
|
+
Start video job with validation.
|
|
225
|
+
|
|
226
|
+
Expected JSON parameters:
|
|
227
|
+
- base_path: str - Base directory path for data
|
|
228
|
+
- camera: int - Camera number (1-based)
|
|
229
|
+
- run: int - Run number (1-based)
|
|
230
|
+
- var: str - Variable to visualize ("ux", "uy", "mag")
|
|
231
|
+
- fps: int (optional) - Video frame rate (1-120, default: 30)
|
|
232
|
+
- test_mode: bool (optional) - Create test video with limited frames
|
|
233
|
+
- test_frames: int (optional) - Number of frames for test mode (default: 50)
|
|
234
|
+
- lower/upper: float (optional) - Custom color scale limits
|
|
235
|
+
- cmap: str (optional) - Matplotlib colormap name
|
|
236
|
+
- resolution: str (optional) - Video resolution ("4k" or default)
|
|
237
|
+
- out_name: str (optional) - Custom output filename
|
|
238
|
+
"""
|
|
239
|
+
global _video_thread
|
|
240
|
+
|
|
241
|
+
data = request.get_json(silent=True) or {}
|
|
242
|
+
cfg = get_config(refresh=True)
|
|
243
|
+
|
|
244
|
+
# Validate inputs
|
|
245
|
+
base_path_str = data.get("base_path")
|
|
246
|
+
if not base_path_str:
|
|
247
|
+
return jsonify({"error": "base_path is required"}), 400
|
|
248
|
+
base = Path(base_path_str).expanduser()
|
|
249
|
+
if not base.exists():
|
|
250
|
+
return jsonify({"error": "Invalid base_path"}), 400
|
|
251
|
+
|
|
252
|
+
cam_raw = data.get("camera")
|
|
253
|
+
if cam_raw is None:
|
|
254
|
+
return jsonify({"error": "camera is required"}), 400
|
|
255
|
+
try:
|
|
256
|
+
cam = int(cam_raw)
|
|
257
|
+
if cam < 1:
|
|
258
|
+
raise ValueError
|
|
259
|
+
except ValueError:
|
|
260
|
+
return jsonify({"error": "Invalid camera number"}), 400
|
|
261
|
+
|
|
262
|
+
test_mode = data.get("test_mode", False)
|
|
263
|
+
if not isinstance(test_mode, bool):
|
|
264
|
+
return jsonify({"error": "test_mode must be boolean"}), 400
|
|
265
|
+
test_frames = int(data.get("test_frames", 50))
|
|
266
|
+
if test_frames < 1:
|
|
267
|
+
return jsonify({"error": "test_frames must be positive"}), 400
|
|
268
|
+
|
|
269
|
+
# Parse run as the run number (1-based)
|
|
270
|
+
run_raw = data.get("run")
|
|
271
|
+
if run_raw is None:
|
|
272
|
+
return jsonify({"error": "run is required"}), 400
|
|
273
|
+
try:
|
|
274
|
+
run = int(run_raw)
|
|
275
|
+
if run < 1:
|
|
276
|
+
raise ValueError
|
|
277
|
+
except ValueError:
|
|
278
|
+
return jsonify({"error": "Invalid run number"}), 400
|
|
279
|
+
|
|
280
|
+
num_images = int(data.get("num_images", 1)) # Keep for other uses, e.g., if needed elsewhere
|
|
281
|
+
if num_images < 1:
|
|
282
|
+
return jsonify({"error": "num_images must be positive"}), 400
|
|
283
|
+
merged_flag = str(data.get("merged", "0")) in ("1", "true", "True")
|
|
284
|
+
endpoint = data.get("endpoint", "") or ""
|
|
285
|
+
source_type = data.get("type", "instantaneous") or "instantaneous"
|
|
286
|
+
if source_type not in ["instantaneous", "ensemble"]: # Add allowed types
|
|
287
|
+
return jsonify({"error": "Invalid source_type"}), 400
|
|
288
|
+
|
|
289
|
+
var = data.get("var", None) or data.get("var", "uy")
|
|
290
|
+
if var not in ("ux", "uy", "mag"):
|
|
291
|
+
return jsonify({"error": "Invalid var"}), 400
|
|
292
|
+
pattern = data.get("pattern", "[0-9]*.mat")
|
|
293
|
+
|
|
294
|
+
ps = PlotSettings()
|
|
295
|
+
|
|
296
|
+
# Parse FPS with validation (frames per second for video output)
|
|
297
|
+
fps = data.get("fps", 30) # Default to 30 FPS if not provided
|
|
298
|
+
try:
|
|
299
|
+
fps = int(fps)
|
|
300
|
+
if fps < 1 or fps > 120: # Reasonable range: 1-120 FPS
|
|
301
|
+
return jsonify({"error": "FPS must be between 1 and 120"}), 400
|
|
302
|
+
ps.fps = fps
|
|
303
|
+
except (ValueError, TypeError):
|
|
304
|
+
return jsonify({"error": "Invalid FPS value"}), 400
|
|
305
|
+
|
|
306
|
+
ps.crf = 18
|
|
307
|
+
ps.upscale = (1080, 1920) if data.get("resolution") != "4k" else (2160, 3840)
|
|
308
|
+
ps.out_path = data.get(
|
|
309
|
+
"out_name",
|
|
310
|
+
f"run{run}_Cam{cam}_{var}{'_test' if test_mode else ''}.mp4", # Use run for filename
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
try:
|
|
314
|
+
lower = data.get("lower")
|
|
315
|
+
upper = data.get("upper")
|
|
316
|
+
ps.lower_limit = float(lower) if lower and str(lower).strip() else None
|
|
317
|
+
ps.upper_limit = float(upper) if upper and str(upper).strip() else None
|
|
318
|
+
except ValueError:
|
|
319
|
+
return jsonify({"error": "Invalid lower/upper limits"}), 400
|
|
320
|
+
|
|
321
|
+
cmap = data.get("cmap")
|
|
322
|
+
if cmap and cmap != "default":
|
|
323
|
+
ps.cmap = cmap
|
|
324
|
+
|
|
325
|
+
with _video_state_lock:
|
|
326
|
+
running = _video_thread is not None and _video_thread.is_alive()
|
|
327
|
+
if running:
|
|
328
|
+
with _video_state_lock:
|
|
329
|
+
st = {k: _video_state.get(k) for k in ("processing", "progress", "message")}
|
|
330
|
+
return jsonify({"status": "busy", **st}), 409
|
|
331
|
+
|
|
332
|
+
_video_cancel_event.clear()
|
|
333
|
+
_video_reset_state()
|
|
334
|
+
_video_set_state(message="Video queued")
|
|
335
|
+
|
|
336
|
+
_video_thread = threading.Thread(
|
|
337
|
+
target=_run_video_job,
|
|
338
|
+
args=(
|
|
339
|
+
base,
|
|
340
|
+
cam,
|
|
341
|
+
num_images, # Pass num_images for folder selection
|
|
342
|
+
run, # Pass run for run_index
|
|
343
|
+
source_type,
|
|
344
|
+
endpoint,
|
|
345
|
+
merged_flag,
|
|
346
|
+
var,
|
|
347
|
+
pattern,
|
|
348
|
+
ps,
|
|
349
|
+
test_mode,
|
|
350
|
+
test_frames,
|
|
351
|
+
),
|
|
352
|
+
daemon=True,
|
|
353
|
+
)
|
|
354
|
+
_video_thread.start()
|
|
355
|
+
|
|
356
|
+
return jsonify({"status": "started", "processing": True, "progress": 0}), 202
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
@video_maker_bp.route("/cancel_video", methods=["POST"])
|
|
360
|
+
def cancel_video():
|
|
361
|
+
"""Cancel video job safely."""
|
|
362
|
+
_video_cancel_event.set()
|
|
363
|
+
with _video_state_lock:
|
|
364
|
+
is_running = bool(_video_thread is not None and _video_thread.is_alive())
|
|
365
|
+
if is_running:
|
|
366
|
+
_video_set_state(message="Cancellation requested")
|
|
367
|
+
return jsonify({"status": "cancelling", "processing": True}), 202
|
|
368
|
+
_video_reset_state()
|
|
369
|
+
return jsonify({"status": "idle", "processing": False}), 200
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
@video_maker_bp.route("/video_status", methods=["GET"])
|
|
373
|
+
def video_status():
|
|
374
|
+
"""Return thread-safe status."""
|
|
375
|
+
with _video_state_lock:
|
|
376
|
+
st = dict(_video_state)
|
|
377
|
+
st["processing"] = bool(
|
|
378
|
+
st.get("processing", False)
|
|
379
|
+
or (_video_thread is not None and _video_thread.is_alive())
|
|
380
|
+
)
|
|
381
|
+
st["progress"] = int(max(0, min(100, int(st.get("progress", 0)))))
|
|
382
|
+
if st.get("out_path"):
|
|
383
|
+
st["out_path"] = st["out_path"]
|
|
384
|
+
elif st.get("meta") and isinstance(st["meta"], dict) and "out_path" in st["meta"]:
|
|
385
|
+
st["out_path"] = st["meta"]["out_path"]
|
|
386
|
+
if st.get("computed_limits"):
|
|
387
|
+
st["computed_limits"] = st["computed_limits"]
|
|
388
|
+
return jsonify(st), 200
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
@video_maker_bp.route("/download", methods=["GET"])
|
|
392
|
+
def download_video():
|
|
393
|
+
"""Stream video file with range support."""
|
|
394
|
+
try:
|
|
395
|
+
abs_path = Path(request.args.get("path", "")).resolve()
|
|
396
|
+
if not abs_path.is_file() or abs_path.suffix.lower() not in VIDEO_EXTENSIONS:
|
|
397
|
+
return jsonify({"error": "Invalid file"}), 400
|
|
398
|
+
user_home = Path.home()
|
|
399
|
+
cwd = Path.cwd()
|
|
400
|
+
|
|
401
|
+
# Get configured base paths for data access
|
|
402
|
+
cfg = get_config(refresh=True)
|
|
403
|
+
config_base_paths = [Path(bp).resolve() for bp in cfg.base_paths if Path(bp).exists()]
|
|
404
|
+
|
|
405
|
+
allowed_roots = [
|
|
406
|
+
user_home,
|
|
407
|
+
cwd,
|
|
408
|
+
Path("/tmp"),
|
|
409
|
+
Path("/var/tmp"),
|
|
410
|
+
Path("/Users"),
|
|
411
|
+
Path("/home"),
|
|
412
|
+
]
|
|
413
|
+
|
|
414
|
+
# Add configured base paths to allowed roots
|
|
415
|
+
allowed_roots.extend(config_base_paths)
|
|
416
|
+
|
|
417
|
+
if os.name == "nt":
|
|
418
|
+
allowed_roots.extend([Path("C:\\Users"), Path("C:\\temp"), Path("C:\\tmp")])
|
|
419
|
+
path_allowed = any(
|
|
420
|
+
allowed_root in abs_path.parents or abs_path == allowed_root
|
|
421
|
+
for allowed_root in allowed_roots
|
|
422
|
+
)
|
|
423
|
+
if not path_allowed:
|
|
424
|
+
logger.warning(f"Attempted download of disallowed path: {abs_path}")
|
|
425
|
+
logger.debug(f"Allowed roots: {allowed_roots}")
|
|
426
|
+
logger.debug(f"File parents: {list(abs_path.parents)}")
|
|
427
|
+
return jsonify({"error": "File not allowed"}), 403
|
|
428
|
+
response = send_file(
|
|
429
|
+
str(abs_path), mimetype="video/mp4", conditional=True, as_attachment=True
|
|
430
|
+
)
|
|
431
|
+
response.headers.add("Access-Control-Allow-Origin", "*")
|
|
432
|
+
response.headers.add("Access-Control-Allow-Headers", "Range")
|
|
433
|
+
return response
|
|
434
|
+
except Exception as e:
|
|
435
|
+
logger.error(f"Error serving video file: {e}")
|
|
436
|
+
return jsonify({"error": f"Error serving file: {str(e)}"}), 500
|