pypetkitapi 1.9.2__py3-none-any.whl → 1.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pypetkitapi/__init__.py +5 -4
- pypetkitapi/client.py +201 -91
- pypetkitapi/command.py +14 -22
- pypetkitapi/const.py +39 -15
- pypetkitapi/exceptions.py +9 -0
- pypetkitapi/feeder_container.py +5 -6
- pypetkitapi/litter_container.py +7 -8
- pypetkitapi/media.py +423 -0
- pypetkitapi/schedule_container.py +67 -0
- pypetkitapi/utils.py +22 -0
- {pypetkitapi-1.9.2.dist-info → pypetkitapi-1.9.4.dist-info}/LICENSE +1 -1
- {pypetkitapi-1.9.2.dist-info → pypetkitapi-1.9.4.dist-info}/METADATA +12 -3
- pypetkitapi-1.9.4.dist-info/RECORD +18 -0
- {pypetkitapi-1.9.2.dist-info → pypetkitapi-1.9.4.dist-info}/WHEEL +1 -1
- pypetkitapi/medias.py +0 -199
- pypetkitapi-1.9.2.dist-info/RECORD +0 -16
pypetkitapi/const.py
CHANGED
@@ -35,6 +35,8 @@ K3 = "k3"
|
|
35
35
|
PET = "pet"
|
36
36
|
|
37
37
|
DEVICES_LITTER_BOX = [T3, T4, T5, T6]
|
38
|
+
LITTER_WITH_CAMERA = [T5, T6]
|
39
|
+
LITTER_NO_CAMERA = [T3, T4]
|
38
40
|
DEVICES_FEEDER = [FEEDER, FEEDER_MINI, D3, D4, D4S, D4H, D4SH]
|
39
41
|
DEVICES_WATER_FOUNTAIN = [W5, CTW3]
|
40
42
|
DEVICES_PURIFIER = [K2]
|
@@ -68,12 +70,10 @@ class Header(StrEnum):
|
|
68
70
|
ACCEPT = "*/*"
|
69
71
|
ACCEPT_LANG = "en-US;q=1, it-US;q=0.9"
|
70
72
|
ENCODING = "gzip, deflate"
|
71
|
-
API_VERSION = "11.
|
73
|
+
API_VERSION = "11.4.0"
|
72
74
|
CONTENT_TYPE = "application/x-www-form-urlencoded"
|
73
75
|
AGENT = "okhttp/3.12.11"
|
74
76
|
CLIENT = f"{Client.PLATFORM_TYPE}({Client.OS_VERSION};{Client.MODEL_NAME})"
|
75
|
-
TIMEZONE = "1.0"
|
76
|
-
TIMEZONE_ID = "Europe/Paris" # TODO: Make this dynamic
|
77
77
|
LOCALE = "en-US"
|
78
78
|
IMG_VERSION = "1.0"
|
79
79
|
HOUR = "24"
|
@@ -85,17 +85,28 @@ CLIENT_NFO = {
|
|
85
85
|
"osVersion": Client.OS_VERSION.value,
|
86
86
|
"platform": Client.PLATFORM_TYPE.value,
|
87
87
|
"source": Client.SOURCE.value,
|
88
|
-
"timezone": Header.TIMEZONE.value, # TODO: Make this dynamic
|
89
|
-
"timezoneId": Header.TIMEZONE_ID.value,
|
90
88
|
"version": Header.API_VERSION.value,
|
91
89
|
}
|
92
90
|
|
93
91
|
LOGIN_DATA = {
|
94
|
-
"
|
95
|
-
"oldVersion": Header.API_VERSION,
|
92
|
+
"oldVersion": Header.API_VERSION.value,
|
96
93
|
}
|
97
94
|
|
98
95
|
|
96
|
+
class MediaType(StrEnum):
|
97
|
+
"""Record Type constants"""
|
98
|
+
|
99
|
+
VIDEO = "avi"
|
100
|
+
IMAGE = "jpg"
|
101
|
+
|
102
|
+
|
103
|
+
class VideoType(StrEnum):
|
104
|
+
"""Record Type constants"""
|
105
|
+
|
106
|
+
HIGHLIGHT = "highlight"
|
107
|
+
PLAYBACK = "playback"
|
108
|
+
|
109
|
+
|
99
110
|
class RecordType(StrEnum):
|
100
111
|
"""Record Type constants"""
|
101
112
|
|
@@ -103,9 +114,16 @@ class RecordType(StrEnum):
|
|
103
114
|
FEED = "feed"
|
104
115
|
MOVE = "move"
|
105
116
|
PET = "pet"
|
117
|
+
TOILETING = "toileting"
|
106
118
|
|
107
119
|
|
108
|
-
RecordTypeLST = [
|
120
|
+
RecordTypeLST = [
|
121
|
+
RecordType.EAT,
|
122
|
+
RecordType.FEED,
|
123
|
+
RecordType.MOVE,
|
124
|
+
RecordType.PET,
|
125
|
+
RecordType.TOILETING,
|
126
|
+
]
|
109
127
|
|
110
128
|
|
111
129
|
class PetkitEndpoint(StrEnum):
|
@@ -124,6 +142,7 @@ class PetkitEndpoint(StrEnum):
|
|
124
142
|
GET_DEVICE_RECORD = "getDeviceRecord"
|
125
143
|
GET_DEVICE_RECORD_RELEASE = "getDeviceRecordRelease"
|
126
144
|
UPDATE_SETTING = "updateSettings"
|
145
|
+
UPDATE_SETTING_FEEDER_MINI = "update"
|
127
146
|
|
128
147
|
# Bluetooth
|
129
148
|
BLE_AS_RELAY = "ble/ownSupportBleDevices"
|
@@ -143,25 +162,30 @@ class PetkitEndpoint(StrEnum):
|
|
143
162
|
GET_PET_OUT_GRAPH = "getPetOutGraph"
|
144
163
|
|
145
164
|
# Video features
|
165
|
+
GET_M3U8 = "getM3u8"
|
146
166
|
CLOUD_VIDEO = "cloud/video"
|
147
167
|
GET_DOWNLOAD_M3U8 = "getDownloadM3u8"
|
148
|
-
GET_M3U8 = "getM3u8"
|
149
168
|
|
150
169
|
# Feeders
|
151
170
|
REPLENISHED_FOOD = "added"
|
152
171
|
FRESH_ELEMENT_CALIBRATION = "food_reset"
|
153
172
|
FRESH_ELEMENT_CANCEL_FEED = "cancel_realtime_feed"
|
154
|
-
|
155
|
-
|
156
|
-
FRESH_ELEMENT_DESICCANT_RESET = "feeder/desiccant_reset"
|
173
|
+
DESICCANT_RESET_OLD = "desiccant_reset"
|
174
|
+
DESICCANT_RESET_NEW = "desiccantReset"
|
157
175
|
CALL_PET = "callPet"
|
158
176
|
CANCEL_FEED = "cancelRealtimeFeed"
|
159
|
-
|
160
|
-
|
161
|
-
MANUAL_FEED_DUAL = "saveDailyFeed"
|
177
|
+
MANUAL_FEED_OLD = "save_dailyfeed" # For Feeder/FeederMini
|
178
|
+
MANUAL_FEED_NEW = "saveDailyFeed" # For all other feeders
|
162
179
|
DAILY_FEED_AND_EAT = "dailyFeedAndEat" # D3
|
163
180
|
FEED_STATISTIC = "feedStatistic" # D4
|
164
181
|
DAILY_FEED = "dailyFeeds" # D4S
|
165
182
|
REMOVE_DAILY_FEED = "removeDailyFeed"
|
166
183
|
RESTORE_DAILY_FEED = "restoreDailyFeed"
|
167
184
|
SAVE_FEED = "saveFeed" # For Feeding plan
|
185
|
+
|
186
|
+
# Schedule
|
187
|
+
SCHEDULE = "schedule/schedules"
|
188
|
+
SCHEDULE_SAVE = "schedule/save"
|
189
|
+
SCHEDULE_REMOVE = "schedule/remove"
|
190
|
+
SCHEDULE_COMPLETE = "schedule/complete"
|
191
|
+
SCHEDULE_HISTORY = "schedule/userHistorySchedules"
|
pypetkitapi/exceptions.py
CHANGED
@@ -11,6 +11,10 @@ class PetkitTimeoutError(PypetkitError):
|
|
11
11
|
"""Class for PyPetkit timeout exceptions."""
|
12
12
|
|
13
13
|
|
14
|
+
class PetkitSessionError(PypetkitError):
|
15
|
+
"""Class for PyPetkit connection exceptions."""
|
16
|
+
|
17
|
+
|
14
18
|
class PetkitSessionExpiredError(PypetkitError):
|
15
19
|
"""Class for PyPetkit connection exceptions."""
|
16
20
|
|
@@ -18,6 +22,11 @@ class PetkitSessionExpiredError(PypetkitError):
|
|
18
22
|
class PetkitAuthenticationUnregisteredEmailError(PypetkitError):
|
19
23
|
"""Exception raised when the email is not registered with Petkit."""
|
20
24
|
|
25
|
+
def __init__(self, region: str):
|
26
|
+
"""Initialize the exception."""
|
27
|
+
self.message = "The email you provided is not registered on Petkit's servers. Please check your email, or you are using the correct region."
|
28
|
+
super().__init__(self.message)
|
29
|
+
|
21
30
|
|
22
31
|
class PetkitRegionalServerNotFoundError(PypetkitError):
|
23
32
|
"""Exception raised when the specified region server is not found."""
|
pypetkitapi/feeder_container.py
CHANGED
@@ -49,7 +49,7 @@ class CameraMultiNew(BaseModel):
|
|
49
49
|
|
50
50
|
enable: int | None = None
|
51
51
|
rpt: str | None = None
|
52
|
-
time: list[
|
52
|
+
time: list[list[int]] | None = None
|
53
53
|
|
54
54
|
|
55
55
|
class SettingsFeeder(BaseModel):
|
@@ -85,9 +85,7 @@ class SettingsFeeder(BaseModel):
|
|
85
85
|
highlight: int | None = None
|
86
86
|
light_config: int | None = Field(None, alias="lightConfig")
|
87
87
|
light_mode: int | None = Field(None, alias="lightMode")
|
88
|
-
light_multi_range: list[
|
89
|
-
None, alias="lightMultiRange"
|
90
|
-
)
|
88
|
+
light_multi_range: list[list[int]] | None = Field(None, alias="lightMultiRange")
|
91
89
|
live_encrypt: int | None = Field(None, alias="liveEncrypt")
|
92
90
|
low_battery_notify: int | None = Field(None, alias="lowBatteryNotify")
|
93
91
|
manual_lock: int | None = Field(None, alias="manualLock")
|
@@ -112,7 +110,7 @@ class SettingsFeeder(BaseModel):
|
|
112
110
|
time_display: int | None = Field(None, alias="timeDisplay")
|
113
111
|
tone_config: int | None = Field(None, alias="toneConfig")
|
114
112
|
tone_mode: int | None = Field(None, alias="toneMode")
|
115
|
-
tone_multi_range: list[
|
113
|
+
tone_multi_range: list[list[int]] | None = Field(None, alias="toneMultiRange")
|
116
114
|
upload: int | None = None
|
117
115
|
volume: int | None = None
|
118
116
|
|
@@ -315,8 +313,9 @@ class Feeder(BaseModel):
|
|
315
313
|
bt_mac: str | None = Field(None, alias="btMac")
|
316
314
|
cloud_product: CloudProduct | None = Field(None, alias="cloudProduct")
|
317
315
|
created_at: str | None = Field(None, alias="createdAt")
|
316
|
+
desc: str | None = None # D3
|
318
317
|
device_records: FeederRecord | None = None
|
319
|
-
firmware:
|
318
|
+
firmware: str
|
320
319
|
firmware_details: list[FirmwareDetail] | None = Field(None, alias="firmwareDetails")
|
321
320
|
hardware: int
|
322
321
|
id: int
|
pypetkitapi/litter_container.py
CHANGED
@@ -9,10 +9,9 @@ from pypetkitapi.const import (
|
|
9
9
|
DEVICE_DATA,
|
10
10
|
DEVICE_RECORDS,
|
11
11
|
DEVICE_STATS,
|
12
|
+
LITTER_NO_CAMERA,
|
13
|
+
LITTER_WITH_CAMERA,
|
12
14
|
T3,
|
13
|
-
T4,
|
14
|
-
T5,
|
15
|
-
T6,
|
16
15
|
PetkitEndpoint,
|
17
16
|
)
|
18
17
|
from pypetkitapi.containers import CloudProduct, Device, FirmwareDetail, Wifi
|
@@ -240,9 +239,9 @@ class LitterRecord(BaseModel):
|
|
240
239
|
@classmethod
|
241
240
|
def get_endpoint(cls, device_type: str) -> str:
|
242
241
|
"""Get the endpoint URL for the given device type."""
|
243
|
-
if device_type in
|
242
|
+
if device_type in LITTER_NO_CAMERA:
|
244
243
|
return PetkitEndpoint.GET_DEVICE_RECORD
|
245
|
-
if device_type in
|
244
|
+
if device_type in LITTER_WITH_CAMERA:
|
246
245
|
return PetkitEndpoint.GET_DEVICE_RECORD_RELEASE
|
247
246
|
raise ValueError(f"Invalid device type: {device_type}")
|
248
247
|
|
@@ -255,11 +254,11 @@ class LitterRecord(BaseModel):
|
|
255
254
|
) -> dict:
|
256
255
|
"""Generate query parameters including request_date."""
|
257
256
|
device_type = device.device_type
|
258
|
-
if device_type in
|
257
|
+
if device_type in LITTER_NO_CAMERA:
|
259
258
|
request_date = request_date or datetime.now().strftime("%Y%m%d")
|
260
259
|
key = "day" if device_type == T3 else "date"
|
261
260
|
return {key: int(request_date), "deviceId": device.device_id}
|
262
|
-
if device_type in
|
261
|
+
if device_type in LITTER_WITH_CAMERA:
|
263
262
|
return {
|
264
263
|
"timestamp": int(datetime.now().timestamp()),
|
265
264
|
"deviceId": device.device_id,
|
@@ -407,7 +406,7 @@ class K3Device(BaseModel):
|
|
407
406
|
|
408
407
|
class Litter(BaseModel):
|
409
408
|
"""Dataclass for Litter Data.
|
410
|
-
Supported devices = T3, T4, T6
|
409
|
+
Supported devices = T3, T4, T5, T6
|
411
410
|
"""
|
412
411
|
|
413
412
|
data_type: ClassVar[str] = DEVICE_DATA
|
pypetkitapi/media.py
ADDED
@@ -0,0 +1,423 @@
|
|
1
|
+
"""Module to manage media files from PetKit devices."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import asyncio
|
6
|
+
from dataclasses import dataclass
|
7
|
+
from datetime import datetime
|
8
|
+
import logging
|
9
|
+
from pathlib import Path
|
10
|
+
from typing import Any
|
11
|
+
from urllib.parse import parse_qs, urlparse
|
12
|
+
|
13
|
+
from aiofiles import open as aio_open
|
14
|
+
import aiohttp
|
15
|
+
from Crypto.Cipher import AES
|
16
|
+
from Crypto.Util.Padding import unpad
|
17
|
+
|
18
|
+
from pypetkitapi import Feeder, Litter, PetKitClient, RecordType
|
19
|
+
from pypetkitapi.const import D4H, D4SH, T5, T6, RecordTypeLST
|
20
|
+
|
21
|
+
_LOGGER = logging.getLogger(__name__)
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class MediaFile:
|
26
|
+
"""Dataclass MediaFile.
|
27
|
+
Represents a media file from a PetKit device.
|
28
|
+
"""
|
29
|
+
|
30
|
+
event_id: str
|
31
|
+
event_type: RecordType
|
32
|
+
device_id: int
|
33
|
+
user_id: str
|
34
|
+
image: str | None
|
35
|
+
video: str | None
|
36
|
+
filepath: str
|
37
|
+
aes_key: str
|
38
|
+
timestamp: int
|
39
|
+
is_available: bool = False
|
40
|
+
|
41
|
+
|
42
|
+
class MediaManager:
|
43
|
+
"""Class to manage media files from PetKit devices."""
|
44
|
+
|
45
|
+
async def get_all_media_files(
|
46
|
+
self, devices: list[Feeder | Litter]
|
47
|
+
) -> list[MediaFile]:
|
48
|
+
"""Get all media files from all devices and return a list of MediaFile."""
|
49
|
+
media_files: list[MediaFile] = []
|
50
|
+
|
51
|
+
for device in devices:
|
52
|
+
if isinstance(device, Feeder):
|
53
|
+
if device.device_nfo and device.device_nfo.device_type in [D4SH, D4H]:
|
54
|
+
media_files.extend(self._process_feeder(device))
|
55
|
+
else:
|
56
|
+
_LOGGER.debug(
|
57
|
+
"Feeder %s does not support media file extraction",
|
58
|
+
device.name,
|
59
|
+
)
|
60
|
+
elif isinstance(device, Litter):
|
61
|
+
if device.device_nfo and device.device_nfo.device_type in [T5, T6]:
|
62
|
+
media_files.extend(self._process_litter(device))
|
63
|
+
else:
|
64
|
+
_LOGGER.debug(
|
65
|
+
"Litter %s does not support media file extraction",
|
66
|
+
device.name,
|
67
|
+
)
|
68
|
+
|
69
|
+
return media_files
|
70
|
+
|
71
|
+
def _process_feeder(self, feeder: Feeder) -> list[MediaFile]:
|
72
|
+
"""Process media files for a Feeder device."""
|
73
|
+
media_files: list[MediaFile] = []
|
74
|
+
records = feeder.device_records
|
75
|
+
|
76
|
+
device_id = (
|
77
|
+
feeder.device_nfo.device_id
|
78
|
+
if feeder.device_nfo and feeder.device_nfo.device_type
|
79
|
+
else None
|
80
|
+
)
|
81
|
+
if device_id is None:
|
82
|
+
raise ValueError("Missing device ID for feeder")
|
83
|
+
|
84
|
+
if not records:
|
85
|
+
return media_files
|
86
|
+
|
87
|
+
for record_type in RecordTypeLST:
|
88
|
+
record_list = getattr(records, record_type, [])
|
89
|
+
for record in record_list:
|
90
|
+
media_files.extend(
|
91
|
+
self._process_feeder_record(
|
92
|
+
record, RecordType(record_type), device_id
|
93
|
+
)
|
94
|
+
)
|
95
|
+
|
96
|
+
return media_files
|
97
|
+
|
98
|
+
def _process_feeder_record(
|
99
|
+
self, record, record_type: RecordType, device_id: int
|
100
|
+
) -> list[MediaFile]:
|
101
|
+
"""Process individual feeder records."""
|
102
|
+
media_files: list[MediaFile] = []
|
103
|
+
user_id = record.user_id
|
104
|
+
|
105
|
+
if not record.items:
|
106
|
+
return media_files
|
107
|
+
|
108
|
+
for item in record.items:
|
109
|
+
timestamp = self._get_timestamp(item)
|
110
|
+
date_str = (
|
111
|
+
datetime.fromtimestamp(timestamp).strftime("%Y%m%d")
|
112
|
+
if timestamp
|
113
|
+
else "unknown"
|
114
|
+
)
|
115
|
+
if not item.event_id:
|
116
|
+
_LOGGER.error("Missing event_id for record item")
|
117
|
+
continue
|
118
|
+
if not user_id:
|
119
|
+
_LOGGER.error("Missing user_id for record item")
|
120
|
+
continue
|
121
|
+
if not item.aes_key:
|
122
|
+
_LOGGER.error("Missing aes_key for record item")
|
123
|
+
continue
|
124
|
+
if timestamp is None:
|
125
|
+
_LOGGER.error("Missing timestamp for record item")
|
126
|
+
continue
|
127
|
+
|
128
|
+
filepath = f"{device_id}/{date_str}/{record_type.name.lower()}"
|
129
|
+
media_files.append(
|
130
|
+
MediaFile(
|
131
|
+
event_id=item.event_id,
|
132
|
+
event_type=record_type,
|
133
|
+
device_id=device_id,
|
134
|
+
user_id=user_id,
|
135
|
+
image=item.preview,
|
136
|
+
video=self.construct_video_url(item.media_api, user_id),
|
137
|
+
filepath=filepath,
|
138
|
+
aes_key=item.aes_key,
|
139
|
+
timestamp=self._get_timestamp(item),
|
140
|
+
)
|
141
|
+
)
|
142
|
+
return media_files
|
143
|
+
|
144
|
+
def _process_litter(self, litter: Litter) -> list[MediaFile]:
|
145
|
+
"""Process media files for a Litter device."""
|
146
|
+
media_files: list[MediaFile] = []
|
147
|
+
records = litter.device_records
|
148
|
+
|
149
|
+
if not records:
|
150
|
+
return media_files
|
151
|
+
|
152
|
+
for record in records:
|
153
|
+
timestamp = record.timestamp or None
|
154
|
+
date_str = (
|
155
|
+
datetime.fromtimestamp(timestamp).strftime("%Y%m%d")
|
156
|
+
if timestamp
|
157
|
+
else "unknown"
|
158
|
+
)
|
159
|
+
if not record.event_id:
|
160
|
+
_LOGGER.error("Missing event_id for record item")
|
161
|
+
continue
|
162
|
+
if not record.device_id:
|
163
|
+
_LOGGER.error("Missing event_id for record item")
|
164
|
+
continue
|
165
|
+
if not record.user_id:
|
166
|
+
_LOGGER.error("Missing user_id for record item")
|
167
|
+
continue
|
168
|
+
if not record.aes_key:
|
169
|
+
_LOGGER.error("Missing aes_key for record item")
|
170
|
+
continue
|
171
|
+
if record.timestamp is None:
|
172
|
+
_LOGGER.error("Missing timestamp for record item")
|
173
|
+
continue
|
174
|
+
|
175
|
+
filepath = f"{record.device_id}/{date_str}/toileting"
|
176
|
+
media_files.append(
|
177
|
+
MediaFile(
|
178
|
+
event_id=record.event_id,
|
179
|
+
event_type=RecordType.TOILETING,
|
180
|
+
device_id=record.device_id,
|
181
|
+
user_id=record.user_id,
|
182
|
+
image=record.preview,
|
183
|
+
video=self.construct_video_url(record.media_api, record.user_id),
|
184
|
+
filepath=filepath,
|
185
|
+
aes_key=record.aes_key,
|
186
|
+
timestamp=record.timestamp,
|
187
|
+
)
|
188
|
+
)
|
189
|
+
return media_files
|
190
|
+
|
191
|
+
@staticmethod
|
192
|
+
def construct_video_url(media_url: str | None, user_id: str | None) -> str | None:
|
193
|
+
"""Construct the video URL."""
|
194
|
+
if not media_url or not user_id:
|
195
|
+
return None
|
196
|
+
params = parse_qs(urlparse(media_url).query)
|
197
|
+
param_dict = {k: v[0] for k, v in params.items()}
|
198
|
+
return f"/d4sh/cloud/video?startTime={param_dict.get("startTime")}&deviceId={param_dict.get("deviceId")}&userId={user_id}&mark={param_dict.get("mark")}"
|
199
|
+
|
200
|
+
@staticmethod
|
201
|
+
def _get_timestamp(item) -> int:
|
202
|
+
"""Extract timestamp from a record item and raise an exception if it is None."""
|
203
|
+
timestamp = (
|
204
|
+
item.timestamp
|
205
|
+
or item.completed_at
|
206
|
+
or item.eat_start_time
|
207
|
+
or item.eat_end_time
|
208
|
+
or item.start_time
|
209
|
+
or item.end_time
|
210
|
+
or item.time
|
211
|
+
or None
|
212
|
+
)
|
213
|
+
if timestamp is None:
|
214
|
+
raise ValueError("Can't find timestamp in record item")
|
215
|
+
return timestamp
|
216
|
+
|
217
|
+
|
218
|
+
class DownloadDecryptMedia:
|
219
|
+
"""Class to download and decrypt media files from PetKit devices."""
|
220
|
+
|
221
|
+
file_data: MediaFile
|
222
|
+
|
223
|
+
def __init__(self, download_path: Path, client: PetKitClient):
|
224
|
+
"""Initialize the class."""
|
225
|
+
self.download_path = download_path
|
226
|
+
self.client = client
|
227
|
+
|
228
|
+
async def get_fpath(self, file_name: str) -> Path:
|
229
|
+
"""Return the full path of the file."""
|
230
|
+
subdir = ""
|
231
|
+
if file_name.endswith(".jpg"):
|
232
|
+
subdir = "snapshot"
|
233
|
+
elif file_name.endswith(".avi"):
|
234
|
+
subdir = "video"
|
235
|
+
return Path(self.download_path / self.file_data.filepath / subdir / file_name)
|
236
|
+
|
237
|
+
async def download_file(self, file_data: MediaFile) -> None:
|
238
|
+
"""Get image and video file"""
|
239
|
+
self.file_data = file_data
|
240
|
+
|
241
|
+
if self.file_data.image:
|
242
|
+
# Download image file
|
243
|
+
await self._get_file(
|
244
|
+
self.file_data.image,
|
245
|
+
self.file_data.aes_key,
|
246
|
+
f"{self.file_data.event_id}.jpg",
|
247
|
+
)
|
248
|
+
|
249
|
+
if self.file_data.video:
|
250
|
+
# Download video file
|
251
|
+
await self._get_video_m3u8()
|
252
|
+
|
253
|
+
async def _get_video_m3u8(self) -> None:
|
254
|
+
"""Iterate through m3u8 file and return all the ts file urls"""
|
255
|
+
aes_key, iv_key, segments_lst = await self._get_m3u8_segments()
|
256
|
+
|
257
|
+
segment_files = []
|
258
|
+
for index, segment in enumerate(segments_lst, start=1):
|
259
|
+
segment_file = await self._get_file(
|
260
|
+
segment, aes_key, f"{index}_{self.file_data.event_id}.avi"
|
261
|
+
)
|
262
|
+
if segment_file:
|
263
|
+
segment_files.append(
|
264
|
+
await self.get_fpath(f"{index}_{self.file_data.event_id}.avi")
|
265
|
+
)
|
266
|
+
|
267
|
+
if len(segment_files) > 1:
|
268
|
+
_LOGGER.debug("Concatenating segments %s", len(segment_files))
|
269
|
+
await self._concat_segments(segment_files, f"{self.file_data.event_id}.avi")
|
270
|
+
elif len(segment_files) == 1:
|
271
|
+
_LOGGER.debug("Single file segment, no need to concatenate")
|
272
|
+
|
273
|
+
async def _get_m3u8_segments(self) -> tuple[str, str, list[str]]:
|
274
|
+
"""Extract the segments from a m3u8 file.
|
275
|
+
:return: Tuple of AES key, IV key, and list of segment URLs
|
276
|
+
"""
|
277
|
+
if not self.file_data.video:
|
278
|
+
raise ValueError("Missing video URL")
|
279
|
+
video_data = await self.client.get_cloud_video(self.file_data.video)
|
280
|
+
|
281
|
+
media_api = video_data.get("mediaApi", None)
|
282
|
+
if not media_api:
|
283
|
+
_LOGGER.error("Missing mediaApi in video data")
|
284
|
+
raise ValueError("Missing mediaApi in video data")
|
285
|
+
return await self.client.extract_segments_m3u8(str(media_api))
|
286
|
+
|
287
|
+
async def _get_file(self, url: str, aes_key: str, full_filename: str) -> bool:
|
288
|
+
"""Download a file from a URL and decrypt it."""
|
289
|
+
|
290
|
+
full_file_path = await self.get_fpath(full_filename)
|
291
|
+
if full_file_path.exists():
|
292
|
+
_LOGGER.debug("File already exist : %s don't re-download it", full_filename)
|
293
|
+
return True
|
294
|
+
|
295
|
+
# Download the file
|
296
|
+
async with aiohttp.ClientSession() as session, session.get(url) as response:
|
297
|
+
if response.status != 200:
|
298
|
+
_LOGGER.error(
|
299
|
+
"Failed to download %s, status code: %s", url, response.status
|
300
|
+
)
|
301
|
+
return False
|
302
|
+
|
303
|
+
content = await response.read()
|
304
|
+
|
305
|
+
encrypted_file_path = await self._save_file(content, f"{full_filename}.enc")
|
306
|
+
# Decrypt the image
|
307
|
+
decrypted_data = await self._decrypt_file(encrypted_file_path, aes_key)
|
308
|
+
|
309
|
+
if decrypted_data:
|
310
|
+
_LOGGER.debug("Decrypt was successful")
|
311
|
+
await self._save_file(decrypted_data, full_filename)
|
312
|
+
return True
|
313
|
+
return False
|
314
|
+
|
315
|
+
async def _save_file(self, content: bytes, filename: str) -> Path:
|
316
|
+
"""Save content to a file asynchronously and return the file path."""
|
317
|
+
file_path = await self.get_fpath(filename)
|
318
|
+
try:
|
319
|
+
# Ensure the directory exists
|
320
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
321
|
+
|
322
|
+
async with aio_open(file_path, "wb") as file:
|
323
|
+
await file.write(content)
|
324
|
+
_LOGGER.debug("Save file OK : %s", file_path)
|
325
|
+
except PermissionError as e:
|
326
|
+
_LOGGER.error("Save file, permission denied %s: %s", file_path, e)
|
327
|
+
except FileNotFoundError as e:
|
328
|
+
_LOGGER.error("Save file, file/folder not found %s: %s", file_path, e)
|
329
|
+
except OSError as e:
|
330
|
+
_LOGGER.error("Save file, error saving file %s: %s", file_path, e)
|
331
|
+
except Exception as e: # noqa: BLE001
|
332
|
+
_LOGGER.error(
|
333
|
+
"Save file, unexpected error saving file %s: %s", file_path, e
|
334
|
+
)
|
335
|
+
return file_path
|
336
|
+
|
337
|
+
@staticmethod
|
338
|
+
async def _decrypt_file(file_path: Path, aes_key: str) -> bytes | None:
|
339
|
+
"""Decrypt a file using AES encryption.
|
340
|
+
:param file_path: Path to the encrypted file.
|
341
|
+
:param aes_key: AES key used for decryption.
|
342
|
+
:return: Decrypted bytes data.
|
343
|
+
"""
|
344
|
+
aes_key = aes_key.removesuffix("\n")
|
345
|
+
key_bytes: bytes = aes_key.encode("utf-8")
|
346
|
+
iv: bytes = b"\x61" * 16
|
347
|
+
cipher: Any = AES.new(key_bytes, AES.MODE_CBC, iv)
|
348
|
+
|
349
|
+
async with aio_open(file_path, "rb") as encrypted_file:
|
350
|
+
encrypted_data: bytes = await encrypted_file.read()
|
351
|
+
|
352
|
+
decrypted_data: bytes = cipher.decrypt(encrypted_data)
|
353
|
+
|
354
|
+
try:
|
355
|
+
decrypted_data = unpad(decrypted_data, AES.block_size)
|
356
|
+
except ValueError as e:
|
357
|
+
_LOGGER.debug("Warning: Padding error occurred, ignoring error: %s", e)
|
358
|
+
|
359
|
+
if Path(file_path).exists():
|
360
|
+
Path(file_path).unlink()
|
361
|
+
return decrypted_data
|
362
|
+
|
363
|
+
async def _concat_segments(self, ts_files: list[Path], output_file):
|
364
|
+
"""Concatenate a list of .ts segments into a single output file without using a temporary file.
|
365
|
+
|
366
|
+
:param ts_files: List of absolute paths of .ts files
|
367
|
+
:param output_file: Path of the output file (e.g., "output.mp4")
|
368
|
+
"""
|
369
|
+
full_output_file = await self.get_fpath(output_file)
|
370
|
+
if full_output_file.exists():
|
371
|
+
_LOGGER.debug(
|
372
|
+
"Output file already exists: %s, skipping concatenation.", output_file
|
373
|
+
)
|
374
|
+
return
|
375
|
+
|
376
|
+
# Build the argument for `ffmpeg` with the files formatted for the command line
|
377
|
+
concat_input = "|".join(str(file) for file in ts_files)
|
378
|
+
command = [
|
379
|
+
"ffmpeg",
|
380
|
+
"-i",
|
381
|
+
f"concat:{concat_input}",
|
382
|
+
"-c",
|
383
|
+
"copy",
|
384
|
+
"-bsf:a",
|
385
|
+
"aac_adtstoasc",
|
386
|
+
str(full_output_file),
|
387
|
+
]
|
388
|
+
|
389
|
+
try:
|
390
|
+
# Run the subprocess asynchronously
|
391
|
+
process = await asyncio.create_subprocess_exec(
|
392
|
+
*command,
|
393
|
+
stdout=asyncio.subprocess.PIPE,
|
394
|
+
stderr=asyncio.subprocess.PIPE,
|
395
|
+
)
|
396
|
+
stdout, stderr = await process.communicate()
|
397
|
+
|
398
|
+
if process.returncode == 0:
|
399
|
+
_LOGGER.debug("File successfully concatenated: %s", full_output_file)
|
400
|
+
await self._delete_segments(ts_files)
|
401
|
+
else:
|
402
|
+
_LOGGER.error(
|
403
|
+
"Error during concatenation: %s\nStdout: %s\nStderr: %s",
|
404
|
+
process.returncode,
|
405
|
+
stdout.decode().strip(),
|
406
|
+
stderr.decode().strip(),
|
407
|
+
)
|
408
|
+
except FileNotFoundError as e:
|
409
|
+
_LOGGER.error("Error during concatenation: %s", e)
|
410
|
+
except OSError as e:
|
411
|
+
_LOGGER.error("OS error during concatenation: %s", e)
|
412
|
+
|
413
|
+
async def _delete_segments(self, ts_files: list[Path]) -> None:
|
414
|
+
"""Delete all segment files after concatenation."""
|
415
|
+
for file in ts_files:
|
416
|
+
if file.exists():
|
417
|
+
try:
|
418
|
+
file.unlink()
|
419
|
+
_LOGGER.debug("Deleted segment file: %s", file)
|
420
|
+
except OSError as e:
|
421
|
+
_LOGGER.debug("Error deleting segment file %s: %s", file, e)
|
422
|
+
else:
|
423
|
+
_LOGGER.debug("Segment file not found: %s", file)
|