dji-telemetry 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,267 @@
1
+ """
2
+ DJI SRT telemetry file parser.
3
+ """
4
+
5
+ import re
6
+ import math
7
+ from dataclasses import dataclass, field
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+
12
+ @dataclass
13
+ class TelemetryFrame:
14
+ """Telemetry data for a single frame."""
15
+ frame_num: int
16
+ start_time_ms: float
17
+ end_time_ms: float
18
+ timestamp: str
19
+ iso: int
20
+ shutter: str
21
+ fnum: float
22
+ ev: float
23
+ ct: int # color temperature
24
+ latitude: float
25
+ longitude: float
26
+ rel_alt: float # relative altitude
27
+ abs_alt: float # absolute altitude
28
+ # Calculated fields
29
+ h_speed: float = 0.0 # horizontal speed (m/s)
30
+ v_speed: float = 0.0 # vertical speed (m/s)
31
+ distance: float = 0.0 # cumulative distance traveled (m)
32
+
33
+ def to_dict(self) -> dict:
34
+ """Convert to dictionary."""
35
+ return {
36
+ 'frame_num': self.frame_num,
37
+ 'start_time_ms': self.start_time_ms,
38
+ 'end_time_ms': self.end_time_ms,
39
+ 'timestamp': self.timestamp,
40
+ 'iso': self.iso,
41
+ 'shutter': self.shutter,
42
+ 'fnum': self.fnum,
43
+ 'ev': self.ev,
44
+ 'color_temp': self.ct,
45
+ 'latitude': self.latitude,
46
+ 'longitude': self.longitude,
47
+ 'rel_altitude_m': self.rel_alt,
48
+ 'abs_altitude_m': self.abs_alt,
49
+ 'h_speed_ms': self.h_speed,
50
+ 'v_speed_ms': self.v_speed,
51
+ 'h_speed_kmh': self.h_speed * 3.6,
52
+ 'distance_m': self.distance,
53
+ }
54
+
55
+
56
+ @dataclass
57
+ class TelemetryData:
58
+ """Container for all telemetry data from an SRT file."""
59
+ frames: list[TelemetryFrame] = field(default_factory=list)
60
+ source_file: Optional[str] = None
61
+
62
+ @property
63
+ def duration_seconds(self) -> float:
64
+ """Total duration in seconds."""
65
+ if not self.frames:
66
+ return 0.0
67
+ return self.frames[-1].end_time_ms / 1000.0
68
+
69
+ @property
70
+ def total_distance(self) -> float:
71
+ """Total distance traveled in meters."""
72
+ if not self.frames:
73
+ return 0.0
74
+ return self.frames[-1].distance
75
+
76
+ @property
77
+ def max_altitude(self) -> float:
78
+ """Maximum relative altitude in meters."""
79
+ if not self.frames:
80
+ return 0.0
81
+ return max(f.rel_alt for f in self.frames)
82
+
83
+ @property
84
+ def max_speed(self) -> float:
85
+ """Maximum horizontal speed in m/s."""
86
+ if not self.frames:
87
+ return 0.0
88
+ return max(f.h_speed for f in self.frames)
89
+
90
+ @property
91
+ def start_coordinates(self) -> tuple[float, float]:
92
+ """Starting coordinates (lat, lon)."""
93
+ if not self.frames:
94
+ return (0.0, 0.0)
95
+ return (self.frames[0].latitude, self.frames[0].longitude)
96
+
97
+ @property
98
+ def end_coordinates(self) -> tuple[float, float]:
99
+ """Ending coordinates (lat, lon)."""
100
+ if not self.frames:
101
+ return (0.0, 0.0)
102
+ return (self.frames[-1].latitude, self.frames[-1].longitude)
103
+
104
+ def get_frame_at_time(self, time_ms: float) -> Optional[TelemetryFrame]:
105
+ """Find the telemetry frame for a given video time."""
106
+ for frame in self.frames:
107
+ if frame.start_time_ms <= time_ms < frame.end_time_ms:
108
+ return frame
109
+
110
+ if self.frames:
111
+ if time_ms < self.frames[0].start_time_ms:
112
+ return self.frames[0]
113
+ if time_ms >= self.frames[-1].end_time_ms:
114
+ return self.frames[-1]
115
+
116
+ return None
117
+
118
+ def to_list(self) -> list[dict]:
119
+ """Convert all frames to list of dictionaries."""
120
+ return [f.to_dict() for f in self.frames]
121
+
122
+
123
+ def _parse_time_to_ms(time_str: str) -> float:
124
+ """Convert SRT time format (HH:MM:SS,mmm) to milliseconds."""
125
+ match = re.match(r"(\d+):(\d+):(\d+),(\d+)", time_str)
126
+ if match:
127
+ h, m, s, ms = map(int, match.groups())
128
+ return (h * 3600 + m * 60 + s) * 1000 + ms
129
+ return 0.0
130
+
131
+
132
+ def _haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
133
+ """Calculate distance between two GPS coordinates in meters."""
134
+ R = 6371000 # Earth's radius in meters
135
+
136
+ phi1 = math.radians(lat1)
137
+ phi2 = math.radians(lat2)
138
+ delta_phi = math.radians(lat2 - lat1)
139
+ delta_lambda = math.radians(lon2 - lon1)
140
+
141
+ a = math.sin(delta_phi / 2) ** 2 + \
142
+ math.cos(phi1) * math.cos(phi2) * math.sin(delta_lambda / 2) ** 2
143
+ c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
144
+
145
+ return R * c
146
+
147
+
148
+ def parse_srt(srt_path: str | Path, smooth_speeds: bool = True, window_size: int = 15) -> TelemetryData:
149
+ """
150
+ Parse a DJI SRT file and extract telemetry data.
151
+
152
+ Args:
153
+ srt_path: Path to the SRT file
154
+ smooth_speeds: Apply moving average smoothing to speed calculations
155
+ window_size: Window size for speed smoothing
156
+
157
+ Returns:
158
+ TelemetryData object containing all frames
159
+ """
160
+ srt_path = Path(srt_path)
161
+ frames = []
162
+
163
+ with open(srt_path, 'r', encoding='utf-8') as f:
164
+ content = f.read()
165
+
166
+ # Split into subtitle blocks
167
+ blocks = re.split(r'\n\n+', content.strip())
168
+
169
+ for block in blocks:
170
+ lines = block.strip().split('\n')
171
+ if len(lines) < 3:
172
+ continue
173
+
174
+ # Parse frame number
175
+ try:
176
+ frame_num = int(lines[0])
177
+ except ValueError:
178
+ continue
179
+
180
+ # Parse time range
181
+ time_match = re.match(r"(\d+:\d+:\d+,\d+)\s*-->\s*(\d+:\d+:\d+,\d+)", lines[1])
182
+ if not time_match:
183
+ continue
184
+
185
+ start_time = _parse_time_to_ms(time_match.group(1))
186
+ end_time = _parse_time_to_ms(time_match.group(2))
187
+
188
+ # Join remaining lines for metadata
189
+ metadata_text = ' '.join(lines[2:])
190
+
191
+ # Remove font tags
192
+ metadata_text = re.sub(r'<[^>]+>', '', metadata_text)
193
+
194
+ # Extract timestamp
195
+ timestamp_match = re.search(r'(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}\.\d+)', metadata_text)
196
+ timestamp = timestamp_match.group(1) if timestamp_match else ""
197
+
198
+ # Extract values using regex
199
+ def extract_value(pattern: str, default=0):
200
+ match = re.search(pattern, metadata_text)
201
+ if match:
202
+ try:
203
+ return float(match.group(1))
204
+ except ValueError:
205
+ return default
206
+ return default
207
+
208
+ def extract_str(pattern: str, default=""):
209
+ match = re.search(pattern, metadata_text)
210
+ return match.group(1) if match else default
211
+
212
+ frame = TelemetryFrame(
213
+ frame_num=frame_num,
214
+ start_time_ms=start_time,
215
+ end_time_ms=end_time,
216
+ timestamp=timestamp,
217
+ iso=int(extract_value(r'\[iso:\s*(\d+)\]')),
218
+ shutter=extract_str(r'\[shutter:\s*([^\]]+)\]'),
219
+ fnum=extract_value(r'\[fnum:\s*([\d.]+)\]'),
220
+ ev=extract_value(r'\[ev:\s*([+-]?[\d.]+)\]'),
221
+ ct=int(extract_value(r'\[ct:\s*(\d+)\]')),
222
+ latitude=extract_value(r'\[latitude:\s*([+-]?[\d.]+)\]'),
223
+ longitude=extract_value(r'\[longitude:\s*([+-]?[\d.]+)\]'),
224
+ rel_alt=extract_value(r'\[rel_alt:\s*([\d.]+)'),
225
+ abs_alt=extract_value(r'\[abs_alt:\s*([\d.]+)'),
226
+ )
227
+
228
+ frames.append(frame)
229
+
230
+ # Sort by frame number
231
+ frames.sort(key=lambda f: f.frame_num)
232
+
233
+ # Calculate speeds and cumulative distance
234
+ cumulative_distance = 0.0
235
+ for i in range(1, len(frames)):
236
+ prev = frames[i - 1]
237
+ curr = frames[i]
238
+
239
+ # Time delta in seconds
240
+ dt = (curr.start_time_ms - prev.start_time_ms) / 1000.0
241
+ if dt <= 0:
242
+ dt = 0.033 # ~30fps fallback
243
+
244
+ # Horizontal distance from GPS coordinates
245
+ h_dist = _haversine_distance(prev.latitude, prev.longitude,
246
+ curr.latitude, curr.longitude)
247
+ curr.h_speed = h_dist / dt
248
+ cumulative_distance += h_dist
249
+ curr.distance = cumulative_distance
250
+
251
+ # Vertical speed from altitude change
252
+ v_dist = curr.rel_alt - prev.rel_alt
253
+ curr.v_speed = v_dist / dt
254
+
255
+ # Apply speed smoothing
256
+ if smooth_speeds and len(frames) > window_size:
257
+ h_speeds = [f.h_speed for f in frames]
258
+ v_speeds = [f.v_speed for f in frames]
259
+
260
+ for i in range(len(frames)):
261
+ start_idx = max(0, i - window_size // 2)
262
+ end_idx = min(len(frames), i + window_size // 2 + 1)
263
+
264
+ frames[i].h_speed = sum(h_speeds[start_idx:end_idx]) / (end_idx - start_idx)
265
+ frames[i].v_speed = sum(v_speeds[start_idx:end_idx]) / (end_idx - start_idx)
266
+
267
+ return TelemetryData(frames=frames, source_file=str(srt_path))
dji_telemetry/video.py ADDED
@@ -0,0 +1,299 @@
1
+ """
2
+ Video processing for telemetry overlay.
3
+ """
4
+
5
+ import subprocess
6
+ from pathlib import Path
7
+ from typing import Callable, Optional
8
+
9
+ import cv2
10
+ import numpy as np
11
+
12
+ from .parser import TelemetryData
13
+ from .overlay import OverlayConfig, OverlayRenderer
14
+
15
+
16
+ def process_video(
17
+ video_path: str | Path,
18
+ telemetry: TelemetryData,
19
+ output_path: str | Path,
20
+ config: Optional[OverlayConfig] = None,
21
+ progress_callback: Optional[Callable[[int, int], None]] = None
22
+ ) -> Path:
23
+ """
24
+ Process a video file and add telemetry overlay.
25
+
26
+ Args:
27
+ video_path: Path to input video file
28
+ telemetry: TelemetryData object with telemetry frames
29
+ output_path: Path to output video file
30
+ config: Overlay configuration (uses defaults if None)
31
+ progress_callback: Optional callback function(current_frame, total_frames)
32
+
33
+ Returns:
34
+ Path to the output video file
35
+ """
36
+ video_path = Path(video_path)
37
+ output_path = Path(output_path)
38
+
39
+ cap = cv2.VideoCapture(str(video_path))
40
+ if not cap.isOpened():
41
+ raise IOError(f"Could not open video file: {video_path}")
42
+
43
+ # Get video properties
44
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
45
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
46
+ fps = cap.get(cv2.CAP_PROP_FPS)
47
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
48
+
49
+ # Create overlay renderer
50
+ renderer = OverlayRenderer(width, height, config)
51
+
52
+ # Create video writer
53
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
54
+ out = cv2.VideoWriter(str(output_path), fourcc, fps, (width, height))
55
+
56
+ if not out.isOpened():
57
+ cap.release()
58
+ raise IOError(f"Could not create output video: {output_path}")
59
+
60
+ frame_num = 0
61
+ while True:
62
+ ret, video_frame = cap.read()
63
+ if not ret:
64
+ break
65
+
66
+ # Calculate current time in ms
67
+ current_time_ms = (frame_num / fps) * 1000
68
+
69
+ # Get telemetry for current time
70
+ telem_frame = telemetry.get_frame_at_time(current_time_ms)
71
+
72
+ if telem_frame:
73
+ video_frame = renderer.render(telem_frame, video_frame)
74
+
75
+ out.write(video_frame)
76
+ frame_num += 1
77
+
78
+ if progress_callback:
79
+ progress_callback(frame_num, total_frames)
80
+
81
+ cap.release()
82
+ out.release()
83
+
84
+ return output_path
85
+
86
+
87
+ def generate_overlay_video(
88
+ telemetry: TelemetryData,
89
+ output_path: str | Path,
90
+ width: int = 1920,
91
+ height: int = 1080,
92
+ fps: float = 30.0,
93
+ config: Optional[OverlayConfig] = None,
94
+ progress_callback: Optional[Callable[[int, int], None]] = None
95
+ ) -> Path:
96
+ """
97
+ Generate a transparent overlay video with just telemetry graphics.
98
+
99
+ Args:
100
+ telemetry: TelemetryData object with telemetry frames
101
+ output_path: Path to output video file (recommended: .mov or .webm for transparency)
102
+ width: Video width in pixels
103
+ height: Video height in pixels
104
+ fps: Frames per second
105
+ config: Overlay configuration (uses defaults if None)
106
+ progress_callback: Optional callback function(current_frame, total_frames)
107
+
108
+ Returns:
109
+ Path to the output video file
110
+
111
+ Note:
112
+ For transparency support, use:
113
+ - .mov with PNG codec (best quality, large files)
114
+ - .webm with VP9 codec (good compression, wide support)
115
+ """
116
+ output_path = Path(output_path)
117
+
118
+ # Create overlay renderer
119
+ renderer = OverlayRenderer(width, height, config)
120
+
121
+ # Calculate total frames from telemetry duration
122
+ duration_ms = telemetry.duration_seconds * 1000
123
+ total_frames = int((duration_ms / 1000.0) * fps)
124
+
125
+ # Determine output format based on extension
126
+ ext = output_path.suffix.lower()
127
+
128
+ if ext == '.mov':
129
+ # Use PNG codec for MOV (supports alpha)
130
+ fourcc = cv2.VideoWriter_fourcc(*'png ')
131
+ out = cv2.VideoWriter(str(output_path), fourcc, fps, (width, height), isColor=True)
132
+ use_alpha = False # OpenCV VideoWriter doesn't support alpha directly
133
+ elif ext == '.webm':
134
+ # VP9 codec
135
+ fourcc = cv2.VideoWriter_fourcc(*'VP90')
136
+ out = cv2.VideoWriter(str(output_path), fourcc, fps, (width, height))
137
+ use_alpha = False
138
+ else:
139
+ # Default to mp4v
140
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
141
+ out = cv2.VideoWriter(str(output_path), fourcc, fps, (width, height))
142
+ use_alpha = False
143
+
144
+ if not out.isOpened():
145
+ raise IOError(f"Could not create output video: {output_path}")
146
+
147
+ for frame_num in range(total_frames):
148
+ current_time_ms = (frame_num / fps) * 1000
149
+ telem_frame = telemetry.get_frame_at_time(current_time_ms)
150
+
151
+ if telem_frame:
152
+ # Render on transparent background
153
+ overlay = renderer.render(telem_frame, None)
154
+
155
+ if use_alpha:
156
+ out.write(overlay)
157
+ else:
158
+ # Convert BGRA to BGR with black background
159
+ bgr_frame = cv2.cvtColor(overlay, cv2.COLOR_BGRA2BGR)
160
+ out.write(bgr_frame)
161
+ else:
162
+ # Write black frame
163
+ black_frame = np.zeros((height, width, 3), dtype=np.uint8)
164
+ out.write(black_frame)
165
+
166
+ if progress_callback:
167
+ progress_callback(frame_num + 1, total_frames)
168
+
169
+ out.release()
170
+
171
+ return output_path
172
+
173
+
174
+ def generate_overlay_frames(
175
+ telemetry: TelemetryData,
176
+ output_dir: str | Path,
177
+ width: int = 1920,
178
+ height: int = 1080,
179
+ fps: float = 30.0,
180
+ config: Optional[OverlayConfig] = None,
181
+ format: str = 'png',
182
+ progress_callback: Optional[Callable[[int, int], None]] = None
183
+ ) -> Path:
184
+ """
185
+ Generate transparent overlay frames as individual images.
186
+
187
+ This is useful for compositing in video editors that support image sequences
188
+ with alpha channels.
189
+
190
+ Args:
191
+ telemetry: TelemetryData object with telemetry frames
192
+ output_dir: Directory to save frame images
193
+ width: Frame width in pixels
194
+ height: Frame height in pixels
195
+ fps: Frames per second (determines number of frames)
196
+ config: Overlay configuration (uses defaults if None)
197
+ format: Image format ('png' recommended for transparency)
198
+ progress_callback: Optional callback function(current_frame, total_frames)
199
+
200
+ Returns:
201
+ Path to the output directory
202
+ """
203
+ output_dir = Path(output_dir)
204
+ output_dir.mkdir(parents=True, exist_ok=True)
205
+
206
+ renderer = OverlayRenderer(width, height, config)
207
+
208
+ duration_ms = telemetry.duration_seconds * 1000
209
+ total_frames = int((duration_ms / 1000.0) * fps)
210
+
211
+ for frame_num in range(total_frames):
212
+ current_time_ms = (frame_num / fps) * 1000
213
+ telem_frame = telemetry.get_frame_at_time(current_time_ms)
214
+
215
+ if telem_frame:
216
+ overlay = renderer.render(telem_frame, None)
217
+ else:
218
+ overlay = np.zeros((height, width, 4), dtype=np.uint8)
219
+
220
+ frame_path = output_dir / f"frame_{frame_num:06d}.{format}"
221
+ cv2.imwrite(str(frame_path), overlay)
222
+
223
+ if progress_callback:
224
+ progress_callback(frame_num + 1, total_frames)
225
+
226
+ return output_dir
227
+
228
+
229
+ def add_audio(
230
+ video_path: str | Path,
231
+ audio_source: str | Path,
232
+ output_path: str | Path,
233
+ ffmpeg_path: str = 'ffmpeg'
234
+ ) -> Path:
235
+ """
236
+ Add audio from source file to video using ffmpeg.
237
+
238
+ Args:
239
+ video_path: Path to video file (without audio)
240
+ audio_source: Path to file containing audio track
241
+ output_path: Path to output video file
242
+ ffmpeg_path: Path to ffmpeg executable
243
+
244
+ Returns:
245
+ Path to the output video file
246
+ """
247
+ video_path = Path(video_path)
248
+ audio_source = Path(audio_source)
249
+ output_path = Path(output_path)
250
+
251
+ cmd = [
252
+ ffmpeg_path, '-y',
253
+ '-i', str(video_path),
254
+ '-i', str(audio_source),
255
+ '-c:v', 'copy',
256
+ '-c:a', 'aac',
257
+ '-map', '0:v:0',
258
+ '-map', '1:a:0?', # ? makes audio optional
259
+ '-shortest',
260
+ str(output_path)
261
+ ]
262
+
263
+ try:
264
+ subprocess.run(cmd, capture_output=True, check=True)
265
+ except subprocess.CalledProcessError:
266
+ # If no audio, just copy the video
267
+ import shutil
268
+ shutil.copy(video_path, output_path)
269
+
270
+ return output_path
271
+
272
+
273
+ def get_video_info(video_path: str | Path) -> dict:
274
+ """
275
+ Get video file information.
276
+
277
+ Args:
278
+ video_path: Path to video file
279
+
280
+ Returns:
281
+ Dictionary with video properties
282
+ """
283
+ video_path = Path(video_path)
284
+
285
+ cap = cv2.VideoCapture(str(video_path))
286
+ if not cap.isOpened():
287
+ raise IOError(f"Could not open video file: {video_path}")
288
+
289
+ info = {
290
+ 'width': int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
291
+ 'height': int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
292
+ 'fps': cap.get(cv2.CAP_PROP_FPS),
293
+ 'frame_count': int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
294
+ 'duration_seconds': int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) / cap.get(cv2.CAP_PROP_FPS),
295
+ 'fourcc': int(cap.get(cv2.CAP_PROP_FOURCC)),
296
+ }
297
+
298
+ cap.release()
299
+ return info