pypetkitapi 1.9.3__py3-none-any.whl → 1.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pypetkitapi/containers.py CHANGED
@@ -99,6 +99,15 @@ class Pet(BaseModel):
99
99
  self.name = self.name or self.pet_name
100
100
 
101
101
 
102
+ class UserDevice(BaseModel):
103
+ """Dataclass for user data.
104
+ Subclass of Devices.
105
+ """
106
+
107
+ id: int | None = None
108
+ nick: str | None = None
109
+
110
+
102
111
  class User(BaseModel):
103
112
  """Dataclass for user data.
104
113
  Subclass of AccountData.
pypetkitapi/exceptions.py CHANGED
@@ -11,6 +11,10 @@ class PetkitTimeoutError(PypetkitError):
11
11
  """Class for PyPetkit timeout exceptions."""
12
12
 
13
13
 
14
+ class PetkitSessionError(PypetkitError):
15
+ """Class for PyPetkit connection exceptions."""
16
+
17
+
14
18
  class PetkitSessionExpiredError(PypetkitError):
15
19
  """Class for PyPetkit connection exceptions."""
16
20
 
@@ -18,6 +22,11 @@ class PetkitSessionExpiredError(PypetkitError):
18
22
  class PetkitAuthenticationUnregisteredEmailError(PypetkitError):
19
23
  """Exception raised when the email is not registered with Petkit."""
20
24
 
25
+ def __init__(self):
26
+ """Initialize the exception."""
27
+ self.message = "The email you provided is not registered on Petkit's servers. Please check your email, or you are using the correct region."
28
+ super().__init__(self.message)
29
+
21
30
 
22
31
  class PetkitRegionalServerNotFoundError(PypetkitError):
23
32
  """Exception raised when the specified region server is not found."""
@@ -14,7 +14,13 @@ from pypetkitapi.const import (
14
14
  FEEDER_MINI,
15
15
  PetkitEndpoint,
16
16
  )
17
- from pypetkitapi.containers import CloudProduct, Device, FirmwareDetail, Wifi
17
+ from pypetkitapi.containers import (
18
+ CloudProduct,
19
+ Device,
20
+ FirmwareDetail,
21
+ UserDevice,
22
+ Wifi,
23
+ )
18
24
 
19
25
 
20
26
  class FeedItem(BaseModel):
@@ -335,7 +341,9 @@ class Feeder(BaseModel):
335
341
  sn: str
336
342
  state: StateFeeder | None = None
337
343
  timezone: float | None = None
344
+ user: UserDevice | None = None
338
345
  device_nfo: Device | None = None
346
+ medias: list | None = None
339
347
 
340
348
  @classmethod
341
349
  def get_endpoint(cls, device_type: str) -> str:
@@ -14,7 +14,13 @@ from pypetkitapi.const import (
14
14
  T3,
15
15
  PetkitEndpoint,
16
16
  )
17
- from pypetkitapi.containers import CloudProduct, Device, FirmwareDetail, Wifi
17
+ from pypetkitapi.containers import (
18
+ CloudProduct,
19
+ Device,
20
+ FirmwareDetail,
21
+ UserDevice,
22
+ Wifi,
23
+ )
18
24
 
19
25
 
20
26
  class SettingsLitter(BaseModel):
@@ -445,10 +451,12 @@ class Litter(BaseModel):
445
451
  service_status: int | None = Field(None, alias="serviceStatus")
446
452
  total_time: int | None = Field(None, alias="totalTime")
447
453
  with_k3: int | None = Field(None, alias="withK3")
454
+ User: UserDevice | None = None
448
455
  device_records: list[LitterRecord] | None = None
449
456
  device_stats: LitterStats | None = None
450
457
  device_pet_graph_out: list[PetOutGraph] | None = None
451
458
  device_nfo: Device | None = None
459
+ medias: list | None = None
452
460
 
453
461
  @classmethod
454
462
  def get_endpoint(cls, device_type: str) -> str:
pypetkitapi/media.py ADDED
@@ -0,0 +1,592 @@
1
+ """Module to manage media files from PetKit devices."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from dataclasses import dataclass
7
+ from datetime import datetime
8
+ import logging
9
+ from pathlib import Path
10
+ import re
11
+ from typing import Any
12
+ from urllib.parse import parse_qs, urlparse
13
+
14
+ import aiofiles
15
+ from aiofiles import open as aio_open
16
+ import aiofiles.os
17
+ import aiohttp
18
+ from Crypto.Cipher import AES
19
+ from Crypto.Util.Padding import unpad
20
+
21
+ from pypetkitapi import Feeder, Litter, PetKitClient, RecordType
22
+ from pypetkitapi.const import (
23
+ FEEDER_WITH_CAMERA,
24
+ LITTER_WITH_CAMERA,
25
+ MediaType,
26
+ RecordTypeLST,
27
+ )
28
+
29
+ _LOGGER = logging.getLogger(__name__)
30
+
31
+
32
+ @dataclass
33
+ class MediaCloud:
34
+ """Dataclass MediaFile.
35
+ Represents a media file from a PetKit device.
36
+ """
37
+
38
+ event_id: str
39
+ event_type: RecordType
40
+ device_id: int
41
+ user_id: int
42
+ image: str | None
43
+ video: str | None
44
+ filepath: str
45
+ aes_key: str
46
+ timestamp: int
47
+
48
+
49
+ @dataclass
50
+ class MediaFile:
51
+ """Dataclass MediaFile.
52
+ Represents a media file into disk.
53
+ """
54
+
55
+ event_id: str
56
+ device_id: int
57
+ timestamp: int
58
+ media_type: MediaType
59
+ event_type: RecordType
60
+ full_file_path: Path
61
+
62
+
63
+ class MediaManager:
64
+ """Class to manage media files from PetKit devices."""
65
+
66
+ media_table: list[MediaFile] = []
67
+
68
+ async def get_all_media_files(
69
+ self, devices: list[Feeder | Litter]
70
+ ) -> list[MediaCloud]:
71
+ """Get all media files from all devices and return a list of MediaFile."""
72
+ media_files: list[MediaCloud] = []
73
+ _LOGGER.debug("Processing media files for %s devices", len(devices))
74
+
75
+ for device in devices:
76
+ if isinstance(device, Feeder):
77
+ if (
78
+ device.device_nfo
79
+ and device.device_nfo.device_type in FEEDER_WITH_CAMERA
80
+ ):
81
+ media_files.extend(self._process_feeder(device))
82
+ else:
83
+ _LOGGER.debug(
84
+ "Feeder %s does not support media file extraction",
85
+ device.name,
86
+ )
87
+ elif isinstance(device, Litter):
88
+ if (
89
+ device.device_nfo
90
+ and device.device_nfo.device_type in LITTER_WITH_CAMERA
91
+ ):
92
+ media_files.extend(self._process_litter(device))
93
+ else:
94
+ _LOGGER.debug(
95
+ "Litter %s does not support media file extraction",
96
+ device.name,
97
+ )
98
+
99
+ return media_files
100
+
101
+ async def get_all_media_files_disk(
102
+ self, storage_path: Path, device_id: int
103
+ ) -> None:
104
+ """Construct the media file table for disk storage."""
105
+
106
+ self.media_table.clear()
107
+
108
+ today_str = datetime.now().strftime("%Y%m%d")
109
+ base_path = storage_path / str(device_id) / today_str
110
+
111
+ for record_type in RecordType:
112
+ record_path = base_path / record_type
113
+ snapshot_path = record_path / "snapshot"
114
+ video_path = record_path / "video"
115
+
116
+ # Regex pattern to match valid filenames
117
+ valid_pattern = re.compile(rf"^{device_id}_\d+\.(jpg|avi)$")
118
+
119
+ # Populate the media table with event_id from filenames
120
+ for subdir in [snapshot_path, video_path]:
121
+
122
+ # Ensure the directories exist
123
+ if not await aiofiles.os.path.exists(subdir):
124
+ _LOGGER.debug("Skip, path does not exist, %s", subdir)
125
+ continue
126
+
127
+ _LOGGER.debug("Scanning directory %s", subdir)
128
+ entries = await aiofiles.os.scandir(subdir)
129
+ for entry in entries:
130
+ if entry.is_file() and valid_pattern.match(entry.name):
131
+ _LOGGER.debug("Entries found: %s", entry.name)
132
+ event_id = Path(entry.name).stem
133
+ timestamp = Path(entry.name).stem.split("_", 1)[1]
134
+ media_type_str = Path(entry.name).suffix.lstrip(".")
135
+ try:
136
+ media_type = MediaType(media_type_str)
137
+ except ValueError:
138
+ _LOGGER.warning("Unknown media type: %s", media_type_str)
139
+ continue
140
+ self.media_table.append(
141
+ MediaFile(
142
+ event_id=event_id,
143
+ device_id=device_id,
144
+ timestamp=int(timestamp),
145
+ event_type=RecordType(record_type),
146
+ full_file_path=subdir / entry.name,
147
+ media_type=MediaType(media_type),
148
+ )
149
+ )
150
+
151
+ async def prepare_missing_files(
152
+ self,
153
+ media_cloud_list: list[MediaCloud],
154
+ dl_type: list[MediaType] | None = None,
155
+ event_type: list[RecordType] | None = None,
156
+ ) -> list[MediaCloud]:
157
+ """Compare MediaCloud objects with MediaFile objects and return a list of missing MediaCloud objects."""
158
+ missing_media = []
159
+ existing_event_ids = {media_file.event_id for media_file in self.media_table}
160
+
161
+ for media_cloud in media_cloud_list:
162
+ # Skip if event type is not in the filter
163
+ if event_type and media_cloud.event_type not in event_type:
164
+ continue
165
+
166
+ # Check if the media file is missing
167
+ is_missing = False
168
+ if media_cloud.event_id not in existing_event_ids:
169
+ is_missing = True # Both image and video are missing
170
+ else:
171
+ # Check for missing image
172
+ if (
173
+ media_cloud.image
174
+ and MediaType.IMAGE
175
+ in (dl_type or [MediaType.IMAGE, MediaType.VIDEO])
176
+ and not any(
177
+ media_file.event_id == media_cloud.event_id
178
+ and media_file.media_type == MediaType.IMAGE
179
+ for media_file in self.media_table
180
+ )
181
+ ):
182
+ is_missing = True
183
+ # Check for missing video
184
+ if (
185
+ media_cloud.video
186
+ and MediaType.VIDEO
187
+ in (dl_type or [MediaType.IMAGE, MediaType.VIDEO])
188
+ and not any(
189
+ media_file.event_id == media_cloud.event_id
190
+ and media_file.media_type == MediaType.VIDEO
191
+ for media_file in self.media_table
192
+ )
193
+ ):
194
+ is_missing = True
195
+
196
+ if is_missing:
197
+ missing_media.append(media_cloud)
198
+
199
+ return missing_media
200
+
201
+ def _process_feeder(self, feeder: Feeder) -> list[MediaCloud]:
202
+ """Process media files for a Feeder device."""
203
+ media_files: list[MediaCloud] = []
204
+ records = feeder.device_records
205
+
206
+ if not records:
207
+ _LOGGER.debug("No records found for %s", feeder.name)
208
+ return media_files
209
+
210
+ for record_type in RecordTypeLST:
211
+ record_list = getattr(records, record_type, [])
212
+ for record in record_list:
213
+ media_files.extend(
214
+ self._process_feeder_record(record, RecordType(record_type), feeder)
215
+ )
216
+
217
+ return media_files
218
+
219
+ def _process_feeder_record(
220
+ self, record, record_type: RecordType, device_obj: Feeder
221
+ ) -> list[MediaCloud]:
222
+ """Process individual feeder records."""
223
+ media_files: list[MediaCloud] = []
224
+ user_id = device_obj.user.id if device_obj.user else None
225
+ feeder_id = device_obj.device_nfo.device_id if device_obj.device_nfo else None
226
+ device_type = (
227
+ device_obj.device_nfo.device_type if device_obj.device_nfo else None
228
+ )
229
+ cp_sub = (
230
+ device_obj.cloud_product.subscribe if device_obj.cloud_product else None
231
+ )
232
+
233
+ if not feeder_id:
234
+ _LOGGER.error("Missing feeder_id for record")
235
+ return media_files
236
+
237
+ if not record.items:
238
+ return media_files
239
+
240
+ for item in record.items:
241
+ timestamp = self._get_timestamp(item)
242
+ date_str = (
243
+ datetime.fromtimestamp(timestamp).strftime("%Y%m%d")
244
+ if timestamp
245
+ else "unknown"
246
+ )
247
+ if not item.event_id:
248
+ # Skip feed event in the future
249
+ _LOGGER.debug("Missing event_id for record item")
250
+ continue
251
+ if not user_id:
252
+ _LOGGER.error("Missing user_id for record item")
253
+ continue
254
+ if not item.aes_key:
255
+ _LOGGER.error("Missing aes_key for record item")
256
+ continue
257
+ if timestamp is None:
258
+ _LOGGER.error("Missing timestamp for record item")
259
+ continue
260
+
261
+ filepath = f"{feeder_id}/{date_str}/{record_type.name.lower()}"
262
+ media_files.append(
263
+ MediaCloud(
264
+ event_id=item.event_id,
265
+ event_type=record_type,
266
+ device_id=feeder_id,
267
+ user_id=user_id,
268
+ image=item.preview,
269
+ video=self.construct_video_url(
270
+ device_type, item.media_api, user_id, cp_sub
271
+ ),
272
+ filepath=filepath,
273
+ aes_key=item.aes_key,
274
+ timestamp=self._get_timestamp(item),
275
+ )
276
+ )
277
+ return media_files
278
+
279
+ def _process_litter(self, litter: Litter) -> list[MediaCloud]:
280
+ """Process media files for a Litter device."""
281
+ media_files: list[MediaCloud] = []
282
+ records = litter.device_records
283
+ litter_id = litter.device_nfo.device_id if litter.device_nfo else None
284
+ device_type = litter.device_nfo.device_type if litter.device_nfo else None
285
+ user_id = litter.user.id if litter.user else None
286
+ cp_sub = litter.cloud_product.subscribe if litter.cloud_product else None
287
+
288
+ if not litter_id:
289
+ _LOGGER.error("Missing litter_id for record")
290
+ return media_files
291
+
292
+ if not device_type:
293
+ _LOGGER.error("Missing device_type for record")
294
+ return media_files
295
+
296
+ if not user_id:
297
+ _LOGGER.error("Missing user_id for record")
298
+ return media_files
299
+
300
+ if not records:
301
+ return media_files
302
+
303
+ for record in records:
304
+ timestamp = record.timestamp or None
305
+ date_str = (
306
+ datetime.fromtimestamp(timestamp).strftime("%Y%m%d")
307
+ if timestamp
308
+ else "unknown"
309
+ )
310
+ if not record.event_id:
311
+ _LOGGER.error("Missing event_id for record item")
312
+ continue
313
+ if not record.aes_key:
314
+ _LOGGER.error("Missing aes_key for record item")
315
+ continue
316
+ if record.timestamp is None:
317
+ _LOGGER.error("Missing timestamp for record item")
318
+ continue
319
+
320
+ filepath = f"{litter_id}/{date_str}/toileting"
321
+ media_files.append(
322
+ MediaCloud(
323
+ event_id=record.event_id,
324
+ event_type=RecordType.TOILETING,
325
+ device_id=litter_id,
326
+ user_id=user_id,
327
+ image=record.preview,
328
+ video=self.construct_video_url(
329
+ device_type, record.media_api, user_id, cp_sub
330
+ ),
331
+ filepath=filepath,
332
+ aes_key=record.aes_key,
333
+ timestamp=record.timestamp,
334
+ )
335
+ )
336
+ return media_files
337
+
338
+ @staticmethod
339
+ def construct_video_url(
340
+ device_type: str | None, media_url: str | None, user_id: int, cp_sub: int | None
341
+ ) -> str | None:
342
+ """Construct the video URL."""
343
+ if not media_url or not user_id or cp_sub != 1:
344
+ return None
345
+ params = parse_qs(urlparse(media_url).query)
346
+ param_dict = {k: v[0] for k, v in params.items()}
347
+ return f"/{device_type}/cloud/video?startTime={param_dict.get("startTime")}&deviceId={param_dict.get("deviceId")}&userId={user_id}&mark={param_dict.get("mark")}"
348
+
349
+ @staticmethod
350
+ def _get_timestamp(item) -> int:
351
+ """Extract timestamp from a record item and raise an exception if it is None."""
352
+ timestamp = (
353
+ item.timestamp
354
+ or item.completed_at
355
+ or item.eat_start_time
356
+ or item.eat_end_time
357
+ or item.start_time
358
+ or item.end_time
359
+ or item.time
360
+ or None
361
+ )
362
+ if timestamp is None:
363
+ raise ValueError("Can't find timestamp in record item")
364
+ return timestamp
365
+
366
+
367
+ class DownloadDecryptMedia:
368
+ """Class to download and decrypt media files from PetKit devices."""
369
+
370
+ file_data: MediaCloud
371
+
372
+ def __init__(self, download_path: Path, client: PetKitClient):
373
+ """Initialize the class."""
374
+ self.download_path = download_path
375
+ self.client = client
376
+
377
+ async def get_fpath(self, file_name: str) -> Path:
378
+ """Return the full path of the file."""
379
+ subdir = ""
380
+ if file_name.endswith(".jpg"):
381
+ subdir = "snapshot"
382
+ elif file_name.endswith(".avi"):
383
+ subdir = "video"
384
+ return Path(self.download_path / self.file_data.filepath / subdir / file_name)
385
+
386
+ async def download_file(
387
+ self, file_data: MediaCloud, file_type: MediaType | None
388
+ ) -> None:
389
+ """Get image and video file"""
390
+ _LOGGER.debug("Downloading media file %s", file_data.event_id)
391
+ self.file_data = file_data
392
+
393
+ if self.file_data.image and (file_type is None or file_type == MediaType.IMAGE):
394
+ # Download image file
395
+ await self._get_file(
396
+ self.file_data.image,
397
+ self.file_data.aes_key,
398
+ f"{self.file_data.event_id}.jpg",
399
+ )
400
+
401
+ if self.file_data.video and (file_type is None or file_type == MediaType.VIDEO):
402
+ # Download video file
403
+ await self._get_video_m3u8()
404
+
405
+ async def _get_video_m3u8(self) -> None:
406
+ """Iterate through m3u8 file and return all the ts file urls"""
407
+ aes_key, iv_key, segments_lst = await self._get_m3u8_segments()
408
+
409
+ if aes_key is None or iv_key is None or not segments_lst:
410
+ _LOGGER.debug("Can't download video file %s", self.file_data.event_id)
411
+ return
412
+
413
+ segment_files = []
414
+
415
+ if len(segments_lst) == 1:
416
+ await self._get_file(
417
+ segments_lst[0], aes_key, f"{self.file_data.event_id}.avi"
418
+ )
419
+ return
420
+
421
+ for index, segment in enumerate(segments_lst, start=1):
422
+ segment_file = await self._get_file(
423
+ segment, aes_key, f"{index}_{self.file_data.event_id}.avi"
424
+ )
425
+ if segment_file:
426
+ segment_files.append(
427
+ await self.get_fpath(f"{index}_{self.file_data.event_id}.avi")
428
+ )
429
+
430
+ if not segment_files:
431
+ _LOGGER.error("No segment files found")
432
+ elif len(segment_files) == 1:
433
+ _LOGGER.debug("Single file segment, no need to concatenate")
434
+ elif len(segment_files) > 1:
435
+ _LOGGER.debug("Concatenating segments %s", len(segment_files))
436
+ await self._concat_segments(segment_files, f"{self.file_data.event_id}.avi")
437
+
438
+ async def _get_m3u8_segments(self) -> tuple[str | None, str | None, list[str]]:
439
+ """Extract the segments from a m3u8 file.
440
+ :return: Tuple of AES key, IV key, and list of segment URLs
441
+ """
442
+ if not self.file_data.video:
443
+ raise ValueError("Missing video URL")
444
+ video_data = await self.client.get_cloud_video(self.file_data.video)
445
+
446
+ if not video_data:
447
+ return None, None, []
448
+
449
+ media_api = video_data.get("mediaApi", None)
450
+ if not media_api:
451
+ _LOGGER.error("Missing mediaApi in video data")
452
+ raise ValueError("Missing mediaApi in video data")
453
+ return await self.client.extract_segments_m3u8(str(media_api))
454
+
455
+ async def _get_file(self, url: str, aes_key: str, full_filename: str) -> bool:
456
+ """Download a file from a URL and decrypt it."""
457
+
458
+ full_file_path = await self.get_fpath(full_filename)
459
+ if full_file_path.exists():
460
+ _LOGGER.debug("File already exist : %s don't re-download it", full_filename)
461
+ return True
462
+
463
+ # Download the file
464
+ async with aiohttp.ClientSession() as session, session.get(url) as response:
465
+ if response.status != 200:
466
+ _LOGGER.error(
467
+ "Failed to download %s, status code: %s", url, response.status
468
+ )
469
+ return False
470
+
471
+ content = await response.read()
472
+
473
+ encrypted_file_path = await self._save_file(content, f"{full_filename}.enc")
474
+ # Decrypt the image
475
+ decrypted_data = await self._decrypt_file(encrypted_file_path, aes_key)
476
+
477
+ if decrypted_data:
478
+ _LOGGER.debug("Decrypt was successful")
479
+ await self._save_file(decrypted_data, full_filename)
480
+ return True
481
+ return False
482
+
483
+ async def _save_file(self, content: bytes, filename: str) -> Path:
484
+ """Save content to a file asynchronously and return the file path."""
485
+ file_path = await self.get_fpath(filename)
486
+ try:
487
+ # Ensure the directory exists
488
+ file_path.parent.mkdir(parents=True, exist_ok=True)
489
+
490
+ async with aio_open(file_path, "wb") as file:
491
+ await file.write(content)
492
+ _LOGGER.debug("Save file OK : %s", file_path)
493
+ except PermissionError as e:
494
+ _LOGGER.error("Save file, permission denied %s: %s", file_path, e)
495
+ except FileNotFoundError as e:
496
+ _LOGGER.error("Save file, file/folder not found %s: %s", file_path, e)
497
+ except OSError as e:
498
+ _LOGGER.error("Save file, error saving file %s: %s", file_path, e)
499
+ except Exception as e: # noqa: BLE001
500
+ _LOGGER.error(
501
+ "Save file, unexpected error saving file %s: %s", file_path, e
502
+ )
503
+ return file_path
504
+
505
+ @staticmethod
506
+ async def _decrypt_file(file_path: Path, aes_key: str) -> bytes | None:
507
+ """Decrypt a file using AES encryption.
508
+ :param file_path: Path to the encrypted file.
509
+ :param aes_key: AES key used for decryption.
510
+ :return: Decrypted bytes data.
511
+ """
512
+ aes_key = aes_key.removesuffix("\n")
513
+ key_bytes: bytes = aes_key.encode("utf-8")
514
+ iv: bytes = b"\x61" * 16
515
+ cipher: Any = AES.new(key_bytes, AES.MODE_CBC, iv)
516
+
517
+ async with aio_open(file_path, "rb") as encrypted_file:
518
+ encrypted_data: bytes = await encrypted_file.read()
519
+
520
+ decrypted_data: bytes = cipher.decrypt(encrypted_data)
521
+
522
+ try:
523
+ decrypted_data = unpad(decrypted_data, AES.block_size)
524
+ except ValueError as e:
525
+ _LOGGER.debug("Warning: Padding error occurred, ignoring error: %s", e)
526
+
527
+ if Path(file_path).exists():
528
+ Path(file_path).unlink()
529
+ return decrypted_data
530
+
531
+ async def _concat_segments(self, ts_files: list[Path], output_file):
532
+ """Concatenate a list of .ts segments into a single output file without using a temporary file.
533
+
534
+ :param ts_files: List of absolute paths of .ts files
535
+ :param output_file: Path of the output file (e.g., "output.mp4")
536
+ """
537
+ full_output_file = await self.get_fpath(output_file)
538
+ if full_output_file.exists():
539
+ _LOGGER.debug(
540
+ "Output file already exists: %s, skipping concatenation.", output_file
541
+ )
542
+ await self._delete_segments(ts_files)
543
+ return
544
+
545
+ # Build the argument for `ffmpeg` with the files formatted for the command line
546
+ concat_input = "|".join(str(file) for file in ts_files)
547
+ command = [
548
+ "ffmpeg",
549
+ "-i",
550
+ f"concat:{concat_input}",
551
+ "-c",
552
+ "copy",
553
+ "-bsf:a",
554
+ "aac_adtstoasc",
555
+ str(full_output_file),
556
+ ]
557
+
558
+ try:
559
+ # Run the subprocess asynchronously
560
+ process = await asyncio.create_subprocess_exec(
561
+ *command,
562
+ stdout=asyncio.subprocess.PIPE,
563
+ stderr=asyncio.subprocess.PIPE,
564
+ )
565
+ stdout, stderr = await process.communicate()
566
+
567
+ if process.returncode == 0:
568
+ _LOGGER.debug("File successfully concatenated: %s", full_output_file)
569
+ await self._delete_segments(ts_files)
570
+ else:
571
+ _LOGGER.error(
572
+ "Error during concatenation: %s\nStdout: %s\nStderr: %s",
573
+ process.returncode,
574
+ stdout.decode().strip(),
575
+ stderr.decode().strip(),
576
+ )
577
+ except FileNotFoundError as e:
578
+ _LOGGER.error("Error during concatenation: %s", e)
579
+ except OSError as e:
580
+ _LOGGER.error("OS error during concatenation: %s", e)
581
+
582
+ async def _delete_segments(self, ts_files: list[Path]) -> None:
583
+ """Delete all segment files after concatenation."""
584
+ for file in ts_files:
585
+ if file.exists():
586
+ try:
587
+ file.unlink()
588
+ _LOGGER.debug("Deleted segment file: %s", file)
589
+ except OSError as e:
590
+ _LOGGER.debug("Error deleting segment file %s: %s", file, e)
591
+ else:
592
+ _LOGGER.debug("Segment file not found: %s", file)