pypetkitapi 1.10.3__py3-none-any.whl → 1.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pypetkitapi/__init__.py +1 -1
- pypetkitapi/bluetooth.py +31 -11
- pypetkitapi/client.py +7 -3
- pypetkitapi/const.py +1 -1
- pypetkitapi/media.py +137 -112
- {pypetkitapi-1.10.3.dist-info → pypetkitapi-1.11.0.dist-info}/METADATA +1 -1
- {pypetkitapi-1.10.3.dist-info → pypetkitapi-1.11.0.dist-info}/RECORD +9 -9
- {pypetkitapi-1.10.3.dist-info → pypetkitapi-1.11.0.dist-info}/LICENSE +0 -0
- {pypetkitapi-1.10.3.dist-info → pypetkitapi-1.11.0.dist-info}/WHEEL +0 -0
pypetkitapi/__init__.py
CHANGED
pypetkitapi/bluetooth.py
CHANGED
@@ -77,10 +77,10 @@ class BluetoothManager:
|
|
77
77
|
_LOGGER.info("Opening BLE connection to fountain %s", fountain_id)
|
78
78
|
water_fountain = await self._get_fountain_instance(fountain_id)
|
79
79
|
if await self.check_relay_availability(fountain_id) is False:
|
80
|
-
_LOGGER.error("BLE relay not available.")
|
80
|
+
_LOGGER.error("BLE relay not available (id: %s).", fountain_id)
|
81
81
|
return False
|
82
82
|
if water_fountain.is_connected is True:
|
83
|
-
_LOGGER.error("BLE connection already established
|
83
|
+
_LOGGER.error("BLE connection already established (id %s)", fountain_id)
|
84
84
|
return True
|
85
85
|
response = await self.client.req.request(
|
86
86
|
method=HTTPMethod.POST,
|
@@ -89,11 +89,16 @@ class BluetoothManager:
|
|
89
89
|
headers=await self.client.get_session_id(),
|
90
90
|
)
|
91
91
|
if response != {"state": 1}:
|
92
|
-
_LOGGER.error("
|
92
|
+
_LOGGER.error("Unable to open a BLE connection (id %s)", fountain_id)
|
93
93
|
water_fountain.is_connected = False
|
94
94
|
return False
|
95
95
|
for attempt in range(BLE_CONNECT_ATTEMPT):
|
96
|
-
_LOGGER.
|
96
|
+
_LOGGER.debug(
|
97
|
+
"BLE connection... %s/%s (id %s)",
|
98
|
+
attempt,
|
99
|
+
BLE_CONNECT_ATTEMPT,
|
100
|
+
fountain_id,
|
101
|
+
)
|
97
102
|
response = await self.client.req.request(
|
98
103
|
method=HTTPMethod.POST,
|
99
104
|
url=PetkitEndpoint.BLE_POLL,
|
@@ -101,14 +106,20 @@ class BluetoothManager:
|
|
101
106
|
headers=await self.client.get_session_id(),
|
102
107
|
)
|
103
108
|
if response == 1:
|
104
|
-
_LOGGER.info(
|
109
|
+
_LOGGER.info(
|
110
|
+
"BLE connection established successfully (id %s)", fountain_id
|
111
|
+
)
|
105
112
|
water_fountain.is_connected = True
|
106
113
|
water_fountain.last_ble_poll = datetime.now().strftime(
|
107
114
|
"%Y-%m-%dT%H:%M:%S.%f"
|
108
115
|
)
|
109
116
|
return True
|
110
117
|
await asyncio.sleep(4)
|
111
|
-
_LOGGER.error(
|
118
|
+
_LOGGER.error(
|
119
|
+
"Failed to establish BLE connection after %s attempts (id %s)",
|
120
|
+
BLE_CONNECT_ATTEMPT,
|
121
|
+
fountain_id,
|
122
|
+
)
|
112
123
|
water_fountain.is_connected = False
|
113
124
|
return False
|
114
125
|
|
@@ -116,13 +127,20 @@ class BluetoothManager:
|
|
116
127
|
"""Close the BLE connection to the given fountain_id."""
|
117
128
|
_LOGGER.info("Closing BLE connection to fountain %s", fountain_id)
|
118
129
|
water_fountain = await self._get_fountain_instance(fountain_id)
|
130
|
+
|
131
|
+
if water_fountain.is_connected is False:
|
132
|
+
_LOGGER.error(
|
133
|
+
"BLE connection not established. Cannot close (id %s)", fountain_id
|
134
|
+
)
|
135
|
+
return
|
136
|
+
|
119
137
|
await self.client.req.request(
|
120
138
|
method=HTTPMethod.POST,
|
121
139
|
url=PetkitEndpoint.BLE_CANCEL,
|
122
140
|
data={"bleId": fountain_id, "type": 24, "mac": water_fountain.mac},
|
123
141
|
headers=await self.client.get_session_id(),
|
124
142
|
)
|
125
|
-
_LOGGER.info("BLE connection closed successfully
|
143
|
+
_LOGGER.info("BLE connection closed successfully (id %s)", fountain_id)
|
126
144
|
|
127
145
|
async def get_ble_cmd_data(
|
128
146
|
self, fountain_command: list, counter: int
|
@@ -146,11 +164,13 @@ class BluetoothManager:
|
|
146
164
|
_LOGGER.info("Sending BLE command to fountain %s", fountain_id)
|
147
165
|
water_fountain = await self._get_fountain_instance(fountain_id)
|
148
166
|
if water_fountain.is_connected is False:
|
149
|
-
_LOGGER.error("BLE connection not established
|
167
|
+
_LOGGER.error("BLE connection not established (id %s)", fountain_id)
|
150
168
|
return False
|
151
169
|
command_data = FOUNTAIN_COMMAND.get(command)
|
152
170
|
if command_data is None:
|
153
|
-
_LOGGER.error(
|
171
|
+
_LOGGER.error(
|
172
|
+
"BLE fountain command '%s' not found (id %s)", command, fountain_id
|
173
|
+
)
|
154
174
|
return False
|
155
175
|
cmd_code, cmd_data = await self.get_ble_cmd_data(
|
156
176
|
list(command_data), water_fountain.ble_counter
|
@@ -168,7 +188,7 @@ class BluetoothManager:
|
|
168
188
|
headers=await self.client.get_session_id(),
|
169
189
|
)
|
170
190
|
if response != 1:
|
171
|
-
_LOGGER.error("Failed to send BLE command
|
191
|
+
_LOGGER.error("Failed to send BLE command (id %s)", fountain_id)
|
172
192
|
return False
|
173
|
-
_LOGGER.info("BLE command sent successfully
|
193
|
+
_LOGGER.info("BLE command sent successfully (id %s)", fountain_id)
|
174
194
|
return True
|
pypetkitapi/client.py
CHANGED
@@ -101,8 +101,12 @@ class PetKitClient:
|
|
101
101
|
self.bluetooth_manager = BluetoothManager(self)
|
102
102
|
from pypetkitapi import MediaManager
|
103
103
|
|
104
|
+
from . import __version__
|
105
|
+
|
104
106
|
self.media_manager = MediaManager()
|
105
107
|
|
108
|
+
_LOGGER.debug("PetKit Client initialized (version %s)", __version__)
|
109
|
+
|
106
110
|
async def _get_base_url(self) -> None:
|
107
111
|
"""Get the list of API servers, filter by region, and return the matching server."""
|
108
112
|
_LOGGER.debug("Getting API server list")
|
@@ -337,7 +341,7 @@ class PetKitClient:
|
|
337
341
|
_LOGGER.debug("Fetching media data for device: %s", device.device_id)
|
338
342
|
|
339
343
|
device_entity = self.petkit_entities[device.device_id]
|
340
|
-
device_entity.medias = await self.media_manager.
|
344
|
+
device_entity.medias = await self.media_manager.gather_all_media_from_cloud(
|
341
345
|
[device_entity]
|
342
346
|
)
|
343
347
|
|
@@ -518,8 +522,8 @@ class PetKitClient:
|
|
518
522
|
headers=await self.get_session_id(),
|
519
523
|
)
|
520
524
|
if not isinstance(response, list) or not response:
|
521
|
-
_LOGGER.
|
522
|
-
"No video data found from cloud, looks like you don't have a care+ subscription ?"
|
525
|
+
_LOGGER.warning(
|
526
|
+
"No video data found from cloud, looks like you don't have a care+ subscription ? or video is not uploaded yet."
|
523
527
|
)
|
524
528
|
return None
|
525
529
|
return response[0]
|
pypetkitapi/const.py
CHANGED
pypetkitapi/media.py
CHANGED
@@ -65,7 +65,7 @@ class MediaManager:
|
|
65
65
|
|
66
66
|
media_table: list[MediaFile] = []
|
67
67
|
|
68
|
-
async def
|
68
|
+
async def gather_all_media_from_cloud(
|
69
69
|
self, devices: list[Feeder | Litter]
|
70
70
|
) -> list[MediaCloud]:
|
71
71
|
"""Get all media files from all devices and return a list of MediaCloud.
|
@@ -81,7 +81,7 @@ class MediaManager:
|
|
81
81
|
device.device_nfo
|
82
82
|
and device.device_nfo.device_type in FEEDER_WITH_CAMERA
|
83
83
|
):
|
84
|
-
media_files.extend(self._process_feeder(device))
|
84
|
+
media_files.extend(await self._process_feeder(device))
|
85
85
|
else:
|
86
86
|
_LOGGER.debug(
|
87
87
|
"Feeder %s does not support media file extraction",
|
@@ -92,7 +92,7 @@ class MediaManager:
|
|
92
92
|
device.device_nfo
|
93
93
|
and device.device_nfo.device_type in LITTER_WITH_CAMERA
|
94
94
|
):
|
95
|
-
media_files.extend(self._process_litter(device))
|
95
|
+
media_files.extend(await self._process_litter(device))
|
96
96
|
else:
|
97
97
|
_LOGGER.debug(
|
98
98
|
"Litter %s does not support media file extraction",
|
@@ -101,9 +101,9 @@ class MediaManager:
|
|
101
101
|
|
102
102
|
return media_files
|
103
103
|
|
104
|
-
async def
|
104
|
+
async def gather_all_media_from_disk(
|
105
105
|
self, storage_path: Path, device_id: int
|
106
|
-
) ->
|
106
|
+
) -> list[MediaFile]:
|
107
107
|
"""Construct the media file table for disk storage.
|
108
108
|
:param storage_path: Path to the storage directory
|
109
109
|
:param device_id: Device ID
|
@@ -113,29 +113,33 @@ class MediaManager:
|
|
113
113
|
today_str = datetime.now().strftime("%Y%m%d")
|
114
114
|
base_path = storage_path / str(device_id) / today_str
|
115
115
|
|
116
|
+
_LOGGER.debug("Populating files from directory %s", base_path)
|
117
|
+
|
116
118
|
for record_type in RecordType:
|
117
119
|
record_path = base_path / record_type
|
118
120
|
snapshot_path = record_path / "snapshot"
|
119
121
|
video_path = record_path / "video"
|
120
122
|
|
121
123
|
# Regex pattern to match valid filenames
|
122
|
-
valid_pattern = re.compile(
|
124
|
+
valid_pattern = re.compile(
|
125
|
+
rf"^{device_id}_\d+\.({MediaType.IMAGE}|{MediaType.VIDEO})$"
|
126
|
+
)
|
123
127
|
|
124
128
|
# Populate the media table with event_id from filenames
|
125
129
|
for subdir in [snapshot_path, video_path]:
|
126
130
|
|
127
131
|
# Ensure the directories exist
|
128
132
|
if not await aiofiles.os.path.exists(subdir):
|
129
|
-
_LOGGER.debug("
|
133
|
+
_LOGGER.debug("Path does not exist, skip : %s", subdir)
|
130
134
|
continue
|
131
135
|
|
132
|
-
_LOGGER.debug("Scanning
|
136
|
+
_LOGGER.debug("Scanning files into : %s", subdir)
|
133
137
|
entries = await aiofiles.os.scandir(subdir)
|
134
138
|
for entry in entries:
|
135
139
|
if entry.is_file() and valid_pattern.match(entry.name):
|
136
|
-
_LOGGER.debug("
|
140
|
+
_LOGGER.debug("Media found: %s", entry.name)
|
137
141
|
event_id = Path(entry.name).stem
|
138
|
-
timestamp =
|
142
|
+
timestamp = Path(entry.name).stem.split("_")[1]
|
139
143
|
media_type_str = Path(entry.name).suffix.lstrip(".")
|
140
144
|
try:
|
141
145
|
media_type = MediaType(media_type_str)
|
@@ -152,19 +156,10 @@ class MediaManager:
|
|
152
156
|
media_type=MediaType(media_type),
|
153
157
|
)
|
154
158
|
)
|
159
|
+
_LOGGER.debug("OK, Media table populated with %s files", len(self.media_table))
|
160
|
+
return self.media_table
|
155
161
|
|
156
|
-
|
157
|
-
def _extract_timestamp(file_name: str) -> int:
|
158
|
-
"""Extract timestamp from a filename.
|
159
|
-
:param file_name: Filename
|
160
|
-
:return: Timestamp
|
161
|
-
"""
|
162
|
-
match = re.search(r"_(\d+)\.[a-zA-Z0-9]+$", file_name)
|
163
|
-
if match:
|
164
|
-
return int(match.group(1))
|
165
|
-
return 0
|
166
|
-
|
167
|
-
async def prepare_missing_files(
|
162
|
+
async def list_missing_files(
|
168
163
|
self,
|
169
164
|
media_cloud_list: list[MediaCloud],
|
170
165
|
dl_type: list[MediaType] | None = None,
|
@@ -176,17 +171,29 @@ class MediaManager:
|
|
176
171
|
:param event_type: List of event types to filter
|
177
172
|
:return: List of missing MediaCloud objects
|
178
173
|
"""
|
179
|
-
missing_media = []
|
174
|
+
missing_media: list[MediaCloud] = []
|
180
175
|
existing_event_ids = {media_file.event_id for media_file in self.media_table}
|
181
176
|
|
177
|
+
if dl_type is None or event_type is None or not dl_type or not event_type:
|
178
|
+
_LOGGER.debug(
|
179
|
+
"Missing dl_type or event_type parameters, no media file will be downloaded"
|
180
|
+
)
|
181
|
+
return missing_media
|
182
|
+
|
182
183
|
for media_cloud in media_cloud_list:
|
183
|
-
# Skip if event type is not in the filter
|
184
|
+
# Skip if event type is not in the event filter
|
184
185
|
if event_type and media_cloud.event_type not in event_type:
|
186
|
+
_LOGGER.debug(
|
187
|
+
"Skipping event type %s, is filtered", media_cloud.event_type
|
188
|
+
)
|
185
189
|
continue
|
186
190
|
|
187
191
|
# Check if the media file is missing
|
188
192
|
is_missing = False
|
189
193
|
if media_cloud.event_id not in existing_event_ids:
|
194
|
+
_LOGGER.debug(
|
195
|
+
"Media file IMG/VIDEO id : %s are missing", media_cloud.event_id
|
196
|
+
)
|
190
197
|
is_missing = True # Both image and video are missing
|
191
198
|
else:
|
192
199
|
# Check for missing image
|
@@ -200,6 +207,9 @@ class MediaManager:
|
|
200
207
|
for media_file in self.media_table
|
201
208
|
)
|
202
209
|
):
|
210
|
+
_LOGGER.debug(
|
211
|
+
"Media file IMG id : %s is missing", media_cloud.event_id
|
212
|
+
)
|
203
213
|
is_missing = True
|
204
214
|
# Check for missing video
|
205
215
|
if (
|
@@ -212,14 +222,16 @@ class MediaManager:
|
|
212
222
|
for media_file in self.media_table
|
213
223
|
)
|
214
224
|
):
|
225
|
+
_LOGGER.debug(
|
226
|
+
"Media file VIDEO id : %s is missing", media_cloud.event_id
|
227
|
+
)
|
215
228
|
is_missing = True
|
216
229
|
|
217
230
|
if is_missing:
|
218
231
|
missing_media.append(media_cloud)
|
219
|
-
|
220
232
|
return missing_media
|
221
233
|
|
222
|
-
def _process_feeder(self, feeder: Feeder) -> list[MediaCloud]:
|
234
|
+
async def _process_feeder(self, feeder: Feeder) -> list[MediaCloud]:
|
223
235
|
"""Process media files for a Feeder device.
|
224
236
|
:param feeder: Feeder device object
|
225
237
|
:return: List of MediaCloud objects for the device
|
@@ -235,12 +247,14 @@ class MediaManager:
|
|
235
247
|
record_list = getattr(records, record_type, [])
|
236
248
|
for record in record_list:
|
237
249
|
media_files.extend(
|
238
|
-
self._process_feeder_record(
|
250
|
+
await self._process_feeder_record(
|
251
|
+
record, RecordType(record_type), feeder
|
252
|
+
)
|
239
253
|
)
|
240
254
|
|
241
255
|
return media_files
|
242
256
|
|
243
|
-
def _process_feeder_record(
|
257
|
+
async def _process_feeder_record(
|
244
258
|
self, record, record_type: RecordType, device_obj: Feeder
|
245
259
|
) -> list[MediaCloud]:
|
246
260
|
"""Process individual feeder records.
|
@@ -267,15 +281,15 @@ class MediaManager:
|
|
267
281
|
return media_files
|
268
282
|
|
269
283
|
for item in record.items:
|
270
|
-
timestamp = self._get_timestamp(item)
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
else "unknown"
|
275
|
-
)
|
284
|
+
timestamp = await self._get_timestamp(item)
|
285
|
+
if timestamp is None:
|
286
|
+
_LOGGER.error("Missing timestamp for record item")
|
287
|
+
continue
|
276
288
|
if not item.event_id:
|
277
289
|
# Skip feed event in the future
|
278
|
-
_LOGGER.debug(
|
290
|
+
_LOGGER.debug(
|
291
|
+
"Missing event_id for record item (probably a feed event not yet completed)"
|
292
|
+
)
|
279
293
|
continue
|
280
294
|
if not user_id:
|
281
295
|
_LOGGER.error("Missing user_id for record item")
|
@@ -283,10 +297,8 @@ class MediaManager:
|
|
283
297
|
if not item.aes_key:
|
284
298
|
_LOGGER.error("Missing aes_key for record item")
|
285
299
|
continue
|
286
|
-
if timestamp is None:
|
287
|
-
_LOGGER.error("Missing timestamp for record item")
|
288
|
-
continue
|
289
300
|
|
301
|
+
date_str = await self.get_date_from_ts(timestamp)
|
290
302
|
filepath = f"{feeder_id}/{date_str}/{record_type.name.lower()}"
|
291
303
|
media_files.append(
|
292
304
|
MediaCloud(
|
@@ -295,17 +307,17 @@ class MediaManager:
|
|
295
307
|
device_id=feeder_id,
|
296
308
|
user_id=user_id,
|
297
309
|
image=item.preview,
|
298
|
-
video=self.construct_video_url(
|
310
|
+
video=await self.construct_video_url(
|
299
311
|
device_type, item.media_api, user_id, cp_sub
|
300
312
|
),
|
301
313
|
filepath=filepath,
|
302
314
|
aes_key=item.aes_key,
|
303
|
-
timestamp=
|
315
|
+
timestamp=timestamp,
|
304
316
|
)
|
305
317
|
)
|
306
318
|
return media_files
|
307
319
|
|
308
|
-
def _process_litter(self, litter: Litter) -> list[MediaCloud]:
|
320
|
+
async def _process_litter(self, litter: Litter) -> list[MediaCloud]:
|
309
321
|
"""Process media files for a Litter device.
|
310
322
|
:param litter: Litter device object
|
311
323
|
:return: List of MediaCloud objects for the device
|
@@ -333,22 +345,19 @@ class MediaManager:
|
|
333
345
|
return media_files
|
334
346
|
|
335
347
|
for record in records:
|
336
|
-
timestamp = record.timestamp or None
|
337
|
-
date_str = (
|
338
|
-
datetime.fromtimestamp(timestamp).strftime("%Y%m%d")
|
339
|
-
if timestamp
|
340
|
-
else "unknown"
|
341
|
-
)
|
342
348
|
if not record.event_id:
|
343
|
-
_LOGGER.
|
349
|
+
_LOGGER.debug("Missing event_id for record item")
|
344
350
|
continue
|
345
351
|
if not record.aes_key:
|
346
|
-
_LOGGER.
|
352
|
+
_LOGGER.debug("Missing aes_key for record item")
|
347
353
|
continue
|
348
354
|
if record.timestamp is None:
|
349
|
-
_LOGGER.
|
355
|
+
_LOGGER.debug("Missing timestamp for record item")
|
350
356
|
continue
|
351
357
|
|
358
|
+
timestamp = record.timestamp or None
|
359
|
+
date_str = await self.get_date_from_ts(timestamp)
|
360
|
+
|
352
361
|
filepath = f"{litter_id}/{date_str}/toileting"
|
353
362
|
media_files.append(
|
354
363
|
MediaCloud(
|
@@ -357,7 +366,7 @@ class MediaManager:
|
|
357
366
|
device_id=litter_id,
|
358
367
|
user_id=user_id,
|
359
368
|
image=record.preview,
|
360
|
-
video=self.construct_video_url(
|
369
|
+
video=await self.construct_video_url(
|
361
370
|
device_type, record.media_api, user_id, cp_sub
|
362
371
|
),
|
363
372
|
filepath=filepath,
|
@@ -368,7 +377,17 @@ class MediaManager:
|
|
368
377
|
return media_files
|
369
378
|
|
370
379
|
@staticmethod
|
371
|
-
def
|
380
|
+
async def get_date_from_ts(timestamp: int | None) -> str:
|
381
|
+
"""Get date from timestamp.
|
382
|
+
:param timestamp: Timestamp
|
383
|
+
:return: Date string
|
384
|
+
"""
|
385
|
+
if not timestamp:
|
386
|
+
return "unknown"
|
387
|
+
return datetime.fromtimestamp(timestamp).strftime("%Y%m%d")
|
388
|
+
|
389
|
+
@staticmethod
|
390
|
+
async def construct_video_url(
|
372
391
|
device_type: str | None, media_url: str | None, user_id: int, cp_sub: int | None
|
373
392
|
) -> str | None:
|
374
393
|
"""Construct the video URL.
|
@@ -385,7 +404,7 @@ class MediaManager:
|
|
385
404
|
return f"/{device_type}/cloud/video?startTime={param_dict.get("startTime")}&deviceId={param_dict.get("deviceId")}&userId={user_id}&mark={param_dict.get("mark")}"
|
386
405
|
|
387
406
|
@staticmethod
|
388
|
-
def _get_timestamp(item) -> int:
|
407
|
+
async def _get_timestamp(item) -> int:
|
389
408
|
"""Extract timestamp from a record item and raise an exception if it is None.
|
390
409
|
:param item: Record item
|
391
410
|
:return: Timestamp
|
@@ -421,58 +440,79 @@ class DownloadDecryptMedia:
|
|
421
440
|
:return: Full path of the file.
|
422
441
|
"""
|
423
442
|
subdir = ""
|
424
|
-
if file_name.endswith(
|
443
|
+
if file_name.endswith(MediaType.IMAGE):
|
425
444
|
subdir = "snapshot"
|
426
|
-
elif file_name.endswith(
|
445
|
+
elif file_name.endswith(MediaType.VIDEO):
|
427
446
|
subdir = "video"
|
428
447
|
return Path(self.download_path / self.file_data.filepath / subdir / file_name)
|
429
448
|
|
430
449
|
async def download_file(
|
431
|
-
self, file_data: MediaCloud, file_type: MediaType | None
|
450
|
+
self, file_data: MediaCloud, file_type: list[MediaType] | None
|
432
451
|
) -> None:
|
433
|
-
"""Get image and video
|
434
|
-
:param file_data: MediaCloud object
|
435
|
-
:param file_type: MediaType object
|
436
|
-
"""
|
437
|
-
_LOGGER.debug("Downloading media file %s", file_data.event_id)
|
452
|
+
"""Get image and video files."""
|
438
453
|
self.file_data = file_data
|
454
|
+
if not file_type:
|
455
|
+
file_type = []
|
456
|
+
filename = f"{self.file_data.device_id}_{self.file_data.timestamp}"
|
457
|
+
|
458
|
+
if self.file_data.image and MediaType.IMAGE in file_type:
|
459
|
+
full_filename = f"{filename}.{MediaType.IMAGE}"
|
460
|
+
if await self.not_existing_file(full_filename):
|
461
|
+
# Image download
|
462
|
+
_LOGGER.debug("Download image file (event id: %s)", file_data.event_id)
|
463
|
+
await self._get_file(
|
464
|
+
self.file_data.image,
|
465
|
+
self.file_data.aes_key,
|
466
|
+
f"{self.file_data.device_id}_{self.file_data.timestamp}.{MediaType.IMAGE}",
|
467
|
+
)
|
439
468
|
|
440
|
-
if self.file_data.
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
self.
|
445
|
-
f"{self.file_data.event_id}.jpg",
|
446
|
-
)
|
469
|
+
if self.file_data.video and MediaType.VIDEO in file_type:
|
470
|
+
if await self.not_existing_file(f"{filename}.{MediaType.VIDEO}"):
|
471
|
+
# Video download
|
472
|
+
_LOGGER.debug("Download video file (event id: %s)", file_data.event_id)
|
473
|
+
await self._get_video_m3u8()
|
447
474
|
|
448
|
-
|
449
|
-
|
450
|
-
|
475
|
+
async def not_existing_file(self, file_name: str) -> bool:
|
476
|
+
"""Check if the file already exists.
|
477
|
+
:param file_name: Filename
|
478
|
+
:return: True if the file exists, False otherwise.
|
479
|
+
"""
|
480
|
+
full_file_path = await self.get_fpath(file_name)
|
481
|
+
if full_file_path.exists():
|
482
|
+
_LOGGER.debug(
|
483
|
+
"File already exist : %s don't re-download it", full_file_path
|
484
|
+
)
|
485
|
+
return False
|
486
|
+
return True
|
451
487
|
|
452
488
|
async def _get_video_m3u8(self) -> None:
|
453
|
-
"""Iterate through m3u8 file and return all the ts file
|
489
|
+
"""Iterate through m3u8 file and return all the ts file URLs."""
|
454
490
|
aes_key, iv_key, segments_lst = await self._get_m3u8_segments()
|
491
|
+
file_name = (
|
492
|
+
f"{self.file_data.device_id}_{self.file_data.timestamp}.{MediaType.VIDEO}"
|
493
|
+
)
|
455
494
|
|
456
495
|
if aes_key is None or iv_key is None or not segments_lst:
|
457
|
-
_LOGGER.debug("Can't download video file %s",
|
496
|
+
_LOGGER.debug("Can't download video file %s", file_name)
|
458
497
|
return
|
459
498
|
|
460
|
-
segment_files = []
|
461
|
-
|
462
499
|
if len(segments_lst) == 1:
|
463
|
-
await self._get_file(
|
464
|
-
segments_lst[0], aes_key, f"{self.file_data.event_id}.avi"
|
465
|
-
)
|
500
|
+
await self._get_file(segments_lst[0], aes_key, file_name)
|
466
501
|
return
|
467
502
|
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
)
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
503
|
+
# Download segments in parallel
|
504
|
+
tasks = [
|
505
|
+
self._get_file(segment, aes_key, f"{index}_{file_name}")
|
506
|
+
for index, segment in enumerate(segments_lst, start=1)
|
507
|
+
]
|
508
|
+
results = await asyncio.gather(*tasks)
|
509
|
+
|
510
|
+
# Collect successful downloads
|
511
|
+
segment_files = [
|
512
|
+
await self.get_fpath(f"{index + 1}_{file_name}")
|
513
|
+
for index, success in enumerate(results)
|
514
|
+
if success
|
515
|
+
]
|
476
516
|
|
477
517
|
if not segment_files:
|
478
518
|
_LOGGER.error("No segment files found")
|
@@ -480,7 +520,7 @@ class DownloadDecryptMedia:
|
|
480
520
|
_LOGGER.debug("Single file segment, no need to concatenate")
|
481
521
|
elif len(segment_files) > 1:
|
482
522
|
_LOGGER.debug("Concatenating segments %s", len(segment_files))
|
483
|
-
await self._concat_segments(segment_files,
|
523
|
+
await self._concat_segments(segment_files, file_name)
|
484
524
|
|
485
525
|
async def _get_m3u8_segments(self) -> tuple[str | None, str | None, list[str]]:
|
486
526
|
"""Extract the segments from a m3u8 file.
|
@@ -506,12 +546,6 @@ class DownloadDecryptMedia:
|
|
506
546
|
:param full_filename: Name of the file to save.
|
507
547
|
:return: True if the file was downloaded successfully, False otherwise.
|
508
548
|
"""
|
509
|
-
|
510
|
-
full_file_path = await self.get_fpath(full_filename)
|
511
|
-
if full_file_path.exists():
|
512
|
-
_LOGGER.debug("File already exist : %s don't re-download it", full_filename)
|
513
|
-
return True
|
514
|
-
|
515
549
|
# Download the file
|
516
550
|
async with aiohttp.ClientSession() as session, session.get(url) as response:
|
517
551
|
if response.status != 200:
|
@@ -520,11 +554,8 @@ class DownloadDecryptMedia:
|
|
520
554
|
)
|
521
555
|
return False
|
522
556
|
|
523
|
-
|
524
|
-
|
525
|
-
encrypted_file_path = await self._save_file(content, f"{full_filename}.enc")
|
526
|
-
# Decrypt the image
|
527
|
-
decrypted_data = await self._decrypt_file(encrypted_file_path, aes_key)
|
557
|
+
encrypted_data = await response.read()
|
558
|
+
decrypted_data = await self._decrypt_data(encrypted_data, aes_key)
|
528
559
|
|
529
560
|
if decrypted_data:
|
530
561
|
_LOGGER.debug("Decrypt was successful")
|
@@ -559,9 +590,9 @@ class DownloadDecryptMedia:
|
|
559
590
|
return file_path
|
560
591
|
|
561
592
|
@staticmethod
|
562
|
-
async def
|
593
|
+
async def _decrypt_data(encrypted_data: bytes, aes_key: str) -> bytes | None:
|
563
594
|
"""Decrypt a file using AES encryption.
|
564
|
-
:param
|
595
|
+
:param encrypted_data: Encrypted bytes data.
|
565
596
|
:param aes_key: AES key used for decryption.
|
566
597
|
:return: Decrypted bytes data.
|
567
598
|
"""
|
@@ -569,26 +600,19 @@ class DownloadDecryptMedia:
|
|
569
600
|
key_bytes: bytes = aes_key.encode("utf-8")
|
570
601
|
iv: bytes = b"\x61" * 16
|
571
602
|
cipher: Any = AES.new(key_bytes, AES.MODE_CBC, iv)
|
572
|
-
|
573
|
-
async with aio_open(file_path, "rb") as encrypted_file:
|
574
|
-
encrypted_data: bytes = await encrypted_file.read()
|
575
|
-
|
576
603
|
decrypted_data: bytes = cipher.decrypt(encrypted_data)
|
577
604
|
|
578
605
|
try:
|
579
606
|
decrypted_data = unpad(decrypted_data, AES.block_size)
|
580
607
|
except ValueError as e:
|
581
608
|
_LOGGER.debug("Warning: Padding error occurred, ignoring error: %s", e)
|
582
|
-
|
583
|
-
if Path(file_path).exists():
|
584
|
-
Path(file_path).unlink()
|
585
609
|
return decrypted_data
|
586
610
|
|
587
611
|
async def _concat_segments(self, ts_files: list[Path], output_file) -> None:
|
588
|
-
"""Concatenate a list of .
|
612
|
+
"""Concatenate a list of .mp4 segments into a single output file without using a temporary file.
|
589
613
|
|
590
|
-
:param ts_files: List of absolute paths of .
|
591
|
-
:param output_file: Path of the output file (e.g., "output.
|
614
|
+
:param ts_files: List of absolute paths of .mp4 files
|
615
|
+
:param output_file: Path of the output file (e.g., "output.mp4")
|
592
616
|
"""
|
593
617
|
full_output_file = await self.get_fpath(output_file)
|
594
618
|
if full_output_file.exists():
|
@@ -635,9 +659,10 @@ class DownloadDecryptMedia:
|
|
635
659
|
except OSError as e:
|
636
660
|
_LOGGER.error("OS error during concatenation: %s", e)
|
637
661
|
|
638
|
-
|
662
|
+
@staticmethod
|
663
|
+
async def _delete_segments(ts_files: list[Path]) -> None:
|
639
664
|
"""Delete all segment files after concatenation.
|
640
|
-
:param ts_files: List of absolute paths of .
|
665
|
+
:param ts_files: List of absolute paths of .mp4 files
|
641
666
|
"""
|
642
667
|
for file in ts_files:
|
643
668
|
if file.exists():
|
@@ -1,19 +1,19 @@
|
|
1
|
-
pypetkitapi/__init__.py,sha256=
|
2
|
-
pypetkitapi/bluetooth.py,sha256=
|
3
|
-
pypetkitapi/client.py,sha256=
|
1
|
+
pypetkitapi/__init__.py,sha256=shPCRXZDzB8-Pofzju8FSV6FKX8m8F_vPfW9woBu_DQ,2107
|
2
|
+
pypetkitapi/bluetooth.py,sha256=eu6c2h6YHBafAhhSSy4As6tn29i5WbOH9tZzRlMm44U,7843
|
3
|
+
pypetkitapi/client.py,sha256=wzZQUHg_ee6lmdAjli6zS7qw_sgPER9iLfcTZe4VTH4,27190
|
4
4
|
pypetkitapi/command.py,sha256=cMCUutZCQo9Ddvjl_FYR5UjU_CqFz1iyetMznYwjpzM,7500
|
5
|
-
pypetkitapi/const.py,sha256=
|
5
|
+
pypetkitapi/const.py,sha256=W0cWpBvOySEaPvVAnQHLeIWYEqKG051mVNv-qsfjo7I,4609
|
6
6
|
pypetkitapi/containers.py,sha256=F_uyDBD0a5QD4s_ArjYiKTAAg1XHYBvmV_lEnO9RQ-U,4786
|
7
7
|
pypetkitapi/exceptions.py,sha256=4BXUyYXLfZjNxdnOGJPjyE9ASIl7JmQphjws87jvHtE,1631
|
8
8
|
pypetkitapi/feeder_container.py,sha256=PhidWd5WpsZqtdKZy60PzE67YXgQfApjm8CqvMCHK3U,14743
|
9
9
|
pypetkitapi/litter_container.py,sha256=KWvHNAOJ6hDSeJ_55tqtzY9GxHtd9gAntPkbnVbdb-I,19275
|
10
|
-
pypetkitapi/media.py,sha256=
|
10
|
+
pypetkitapi/media.py,sha256=BW6WHhGGn7hxdZvN27Rcg6vDu4NXB2Q_nTa8arCvacg,25687
|
11
11
|
pypetkitapi/purifier_container.py,sha256=ssyIxhNben5XJ4KlQTXTrtULg2ji6DqHqjzOq08d1-I,2491
|
12
12
|
pypetkitapi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
pypetkitapi/schedule_container.py,sha256=OjLAY6FY-g14JNJJnYMNFV5ZtdkjUzNBit1VUiiZKnQ,2053
|
14
14
|
pypetkitapi/utils.py,sha256=z7325kcJQUburnF28HSXrJMvY_gY9007K73Zwxp-4DQ,743
|
15
15
|
pypetkitapi/water_fountain_container.py,sha256=5J0b-fDZYcFLNX2El7fifv8H6JMhBCt-ttxSow1ozRQ,6787
|
16
|
-
pypetkitapi-1.
|
17
|
-
pypetkitapi-1.
|
18
|
-
pypetkitapi-1.
|
19
|
-
pypetkitapi-1.
|
16
|
+
pypetkitapi-1.11.0.dist-info/LICENSE,sha256=u5jNkZEn6YMrtN4Kr5rU3TcBJ5-eAt0qMx4JDsbsnzM,1074
|
17
|
+
pypetkitapi-1.11.0.dist-info/METADATA,sha256=Xhjy59tO-gmZZodLmf6LDbu0L57IOqFgHL6CdDGfZu4,6256
|
18
|
+
pypetkitapi-1.11.0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
19
|
+
pypetkitapi-1.11.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|