mpytool 2.0.0__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mpytool/conn.py +29 -0
- mpytool/conn_serial.py +92 -5
- mpytool/mpy.py +1063 -78
- mpytool/mpy_comm.py +140 -11
- mpytool/mpytool.py +1041 -393
- mpytool/terminal.py +1 -1
- mpytool/utils.py +6 -6
- mpytool-2.2.0.dist-info/METADATA +462 -0
- mpytool-2.2.0.dist-info/RECORD +16 -0
- {mpytool-2.0.0.dist-info → mpytool-2.2.0.dist-info}/WHEEL +1 -1
- mpytool-2.0.0.dist-info/METADATA +0 -233
- mpytool-2.0.0.dist-info/RECORD +0 -16
- {mpytool-2.0.0.dist-info → mpytool-2.2.0.dist-info}/entry_points.txt +0 -0
- {mpytool-2.0.0.dist-info → mpytool-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {mpytool-2.0.0.dist-info → mpytool-2.2.0.dist-info}/top_level.txt +0 -0
mpytool/mpy.py
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
"""MicroPython tool: main MPY class"""
|
|
2
2
|
|
|
3
|
+
import base64
|
|
4
|
+
import zlib
|
|
5
|
+
|
|
3
6
|
import mpytool.mpy_comm as _mpy_comm
|
|
4
7
|
|
|
5
8
|
|
|
@@ -32,90 +35,104 @@ class DirNotFound(PathNotFound):
|
|
|
32
35
|
|
|
33
36
|
class Mpy():
|
|
34
37
|
_CHUNK = 512
|
|
38
|
+
_CHUNK_AUTO_DETECTED = None # Will be set on first put() if not overridden
|
|
39
|
+
_DEFLATE_AVAILABLE = None # None = not checked, True/False = result
|
|
35
40
|
_ATTR_DIR = 0x4000
|
|
36
41
|
_ATTR_FILE = 0x8000
|
|
42
|
+
# Helper functions for MicroPython device
|
|
43
|
+
# Using _mt_ prefix (mpytool) to avoid collisions, short var names to minimize transfer
|
|
37
44
|
_HELPERS = {
|
|
38
45
|
'stat': f"""
|
|
39
|
-
def
|
|
46
|
+
def _mt_stat(p):
|
|
40
47
|
try:
|
|
41
|
-
|
|
42
|
-
if
|
|
43
|
-
|
|
44
|
-
if res[0] == {_ATTR_FILE}:
|
|
45
|
-
return res[6]
|
|
46
|
-
except:
|
|
47
|
-
return None
|
|
48
|
-
return None
|
|
48
|
+
s=os.stat(p)
|
|
49
|
+
return -1 if s[0]=={_ATTR_DIR} else s[6] if s[0]=={_ATTR_FILE} else None
|
|
50
|
+
except:return None
|
|
49
51
|
""",
|
|
50
52
|
'tree': f"""
|
|
51
|
-
def
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
elif attr == {_ATTR_DIR}:
|
|
62
|
-
if path in ('', '/'):
|
|
63
|
-
sub_path = path + name
|
|
64
|
-
else:
|
|
65
|
-
sub_path = path + '/' + name
|
|
66
|
-
_sub_path, sub_dir_size, sub_tree = _mpytool_tree(sub_path)
|
|
67
|
-
res_dir.append((name, sub_dir_size, sub_tree))
|
|
68
|
-
dir_size += sub_dir_size
|
|
69
|
-
return path, dir_size, res_dir + res_file
|
|
53
|
+
def _mt_tree(p):
|
|
54
|
+
D,F,sz=[],[],0
|
|
55
|
+
for e in os.ilistdir(p):
|
|
56
|
+
n,a=e[:2]
|
|
57
|
+
if a=={_ATTR_FILE}:
|
|
58
|
+
F.append((n,e[3],None));sz+=e[3]
|
|
59
|
+
elif a=={_ATTR_DIR}:
|
|
60
|
+
_,s,t=_mt_tree((p+'/'if p not in('','/')else p)+n)
|
|
61
|
+
D.append((n,s,t));sz+=s
|
|
62
|
+
return p,sz,D+F
|
|
70
63
|
""",
|
|
71
64
|
'mkdir': f"""
|
|
72
|
-
def
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
if
|
|
78
|
-
check_path += '/'
|
|
79
|
-
check_path += dir_part
|
|
80
|
-
if found:
|
|
65
|
+
def _mt_mkdir(p):
|
|
66
|
+
p=p.rstrip('/');c='';f=1
|
|
67
|
+
for d in p.split('/'):
|
|
68
|
+
if not d:c='/';continue
|
|
69
|
+
c='/'+d if c=='/' else (c+'/'+d if c else d)
|
|
70
|
+
if f:
|
|
81
71
|
try:
|
|
82
|
-
|
|
83
|
-
if result[0] == {_ATTR_FILE}:
|
|
84
|
-
return True
|
|
72
|
+
if os.stat(c)[0]=={_ATTR_FILE}:return 1
|
|
85
73
|
continue
|
|
86
|
-
except:
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
return False
|
|
74
|
+
except:f=0
|
|
75
|
+
os.mkdir(c)
|
|
76
|
+
return 0
|
|
90
77
|
""",
|
|
91
78
|
'rmdir': f"""
|
|
92
|
-
def
|
|
93
|
-
for
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
elif
|
|
97
|
-
|
|
98
|
-
os.rmdir(path)
|
|
79
|
+
def _mt_rmdir(p):
|
|
80
|
+
for n,a,_,_ in os.ilistdir(p):
|
|
81
|
+
q=p+'/'+n
|
|
82
|
+
if a=={_ATTR_FILE}:os.remove(q)
|
|
83
|
+
elif a=={_ATTR_DIR}:_mt_rmdir(q)
|
|
84
|
+
os.rmdir(p)
|
|
99
85
|
""",
|
|
100
|
-
'
|
|
101
|
-
def
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
86
|
+
'_hash': """
|
|
87
|
+
def _mt_hash(p):
|
|
88
|
+
h=hashlib.sha256()
|
|
89
|
+
with open(p,'rb')as f:
|
|
90
|
+
while 1:
|
|
91
|
+
c=f.read(512)
|
|
92
|
+
if not c:break
|
|
93
|
+
h.update(c)
|
|
94
|
+
return ubinascii.b2a_base64(h.digest()).strip()
|
|
95
|
+
""",
|
|
96
|
+
'fileinfo': f"""
|
|
97
|
+
def _mt_finfo(files):
|
|
98
|
+
r={{}}
|
|
99
|
+
for p,xsz in files.items():
|
|
100
|
+
try:
|
|
101
|
+
s=os.stat(p)
|
|
102
|
+
if s[0]!={_ATTR_FILE}:r[p]=None;continue
|
|
103
|
+
sz=s[6]
|
|
104
|
+
r[p]=(sz,None)if sz!=xsz else(sz,_mt_hash(p))
|
|
105
|
+
except:r[p]=None
|
|
106
|
+
gc.collect()
|
|
107
|
+
return r
|
|
108
|
+
""",
|
|
109
|
+
'partition_magic': """
|
|
110
|
+
def _mt_pmagic(label, size=512):
|
|
111
|
+
parts = esp32.Partition.find(esp32.Partition.TYPE_DATA, label=label)
|
|
112
|
+
if not parts:
|
|
113
|
+
return None
|
|
114
|
+
p = parts[0]
|
|
115
|
+
buf = bytearray(size)
|
|
116
|
+
p.readblocks(0, buf)
|
|
117
|
+
# Return magic bytes and block size (ioctl 5)
|
|
118
|
+
return bytes(buf), p.ioctl(5, 0)
|
|
119
|
+
""",
|
|
120
|
+
'partition_find': """
|
|
121
|
+
def _mt_pfind(label):
|
|
122
|
+
p = esp32.Partition.find(esp32.Partition.TYPE_APP, label=label)
|
|
123
|
+
if not p:
|
|
124
|
+
p = esp32.Partition.find(esp32.Partition.TYPE_DATA, label=label)
|
|
125
|
+
return p[0] if p else None
|
|
111
126
|
"""}
|
|
112
127
|
|
|
113
|
-
def __init__(self, conn, log=None):
|
|
128
|
+
def __init__(self, conn, log=None, chunk_size=None):
|
|
114
129
|
self._conn = conn
|
|
115
130
|
self._log = log
|
|
116
131
|
self._mpy_comm = _mpy_comm.MpyComm(conn, log=log)
|
|
117
132
|
self._imported = []
|
|
118
133
|
self._load_helpers = []
|
|
134
|
+
self._chunk_size = chunk_size # None = auto-detect
|
|
135
|
+
self._platform = None # Cached platform name
|
|
119
136
|
|
|
120
137
|
@property
|
|
121
138
|
def conn(self):
|
|
@@ -132,10 +149,20 @@ def _mpytool_hashfile(path):
|
|
|
132
149
|
def reset_state(self):
|
|
133
150
|
"""Reset internal state after device reset
|
|
134
151
|
|
|
135
|
-
Call this after soft_reset() to clear cached
|
|
152
|
+
Call this after soft_reset() or hard_reset() to clear cached state.
|
|
136
153
|
"""
|
|
137
154
|
self._imported = []
|
|
138
155
|
self._load_helpers = []
|
|
156
|
+
self._mpy_comm._repl_mode = None
|
|
157
|
+
self._platform = None
|
|
158
|
+
Mpy._CHUNK_AUTO_DETECTED = None
|
|
159
|
+
Mpy._DEFLATE_AVAILABLE = None
|
|
160
|
+
|
|
161
|
+
def _get_platform(self):
|
|
162
|
+
"""Get cached platform name (e.g. 'esp32', 'rp2')"""
|
|
163
|
+
if self._platform is None:
|
|
164
|
+
self._platform = self.platform()['platform']
|
|
165
|
+
return self._platform
|
|
139
166
|
|
|
140
167
|
def load_helper(self, helper):
|
|
141
168
|
"""Load helper function to MicroPython
|
|
@@ -172,7 +199,7 @@ def _mpytool_hashfile(path):
|
|
|
172
199
|
"""
|
|
173
200
|
self.import_module('os')
|
|
174
201
|
self.load_helper('stat')
|
|
175
|
-
return self._mpy_comm.exec_eval(f"
|
|
202
|
+
return self._mpy_comm.exec_eval(f"_mt_stat('{_escape_path(path)}')")
|
|
176
203
|
|
|
177
204
|
def ls(self, path=None):
|
|
178
205
|
"""List files on path
|
|
@@ -224,13 +251,12 @@ def _mpytool_hashfile(path):
|
|
|
224
251
|
if path is None:
|
|
225
252
|
path = ''
|
|
226
253
|
if path in ('', '.', '/'):
|
|
227
|
-
return self._mpy_comm.exec_eval(f"
|
|
228
|
-
# check if path exists
|
|
254
|
+
return self._mpy_comm.exec_eval(f"_mt_tree('{_escape_path(path)}')")
|
|
229
255
|
result = self.stat(path)
|
|
230
256
|
if result is None:
|
|
231
257
|
raise DirNotFound(path)
|
|
232
258
|
if result == -1:
|
|
233
|
-
return self._mpy_comm.exec_eval(f"
|
|
259
|
+
return self._mpy_comm.exec_eval(f"_mt_tree('{_escape_path(path)}')")
|
|
234
260
|
return (path, result, None)
|
|
235
261
|
|
|
236
262
|
def mkdir(self, path):
|
|
@@ -241,7 +267,7 @@ def _mpytool_hashfile(path):
|
|
|
241
267
|
"""
|
|
242
268
|
self.import_module('os')
|
|
243
269
|
self.load_helper('mkdir')
|
|
244
|
-
if self._mpy_comm.exec_eval(f"
|
|
270
|
+
if self._mpy_comm.exec_eval(f"_mt_mkdir('{_escape_path(path)}')"):
|
|
245
271
|
raise _mpy_comm.MpyError(f'Error creating directory, this is file: {path}')
|
|
246
272
|
|
|
247
273
|
def delete(self, path):
|
|
@@ -256,7 +282,7 @@ def _mpytool_hashfile(path):
|
|
|
256
282
|
if result == -1:
|
|
257
283
|
self.import_module('os')
|
|
258
284
|
self.load_helper('rmdir')
|
|
259
|
-
self._mpy_comm.exec(f"
|
|
285
|
+
self._mpy_comm.exec(f"_mt_rmdir('{_escape_path(path)}')", 20)
|
|
260
286
|
else:
|
|
261
287
|
self._mpy_comm.exec(f"os.remove('{_escape_path(path)}')")
|
|
262
288
|
|
|
@@ -270,6 +296,27 @@ def _mpytool_hashfile(path):
|
|
|
270
296
|
self.import_module('os')
|
|
271
297
|
self._mpy_comm.exec(f"os.rename('{_escape_path(src)}', '{_escape_path(dst)}')")
|
|
272
298
|
|
|
299
|
+
def getcwd(self):
|
|
300
|
+
"""Get current working directory
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
current working directory path
|
|
304
|
+
"""
|
|
305
|
+
self.import_module('os')
|
|
306
|
+
return self._mpy_comm.exec_eval("repr(os.getcwd())")
|
|
307
|
+
|
|
308
|
+
def chdir(self, path):
|
|
309
|
+
"""Change current working directory
|
|
310
|
+
|
|
311
|
+
Arguments:
|
|
312
|
+
path: directory path to change to
|
|
313
|
+
"""
|
|
314
|
+
self.import_module('os')
|
|
315
|
+
try:
|
|
316
|
+
self._mpy_comm.exec(f"os.chdir('{_escape_path(path)}')")
|
|
317
|
+
except _mpy_comm.CmdError as err:
|
|
318
|
+
raise DirNotFound(path) from err
|
|
319
|
+
|
|
273
320
|
def hashfile(self, path):
|
|
274
321
|
"""Compute SHA256 hash of file
|
|
275
322
|
|
|
@@ -279,9 +326,41 @@ def _mpytool_hashfile(path):
|
|
|
279
326
|
Returns:
|
|
280
327
|
bytes with SHA256 hash (32 bytes) or None if hashlib not available
|
|
281
328
|
"""
|
|
282
|
-
self.
|
|
329
|
+
self.import_module('hashlib')
|
|
330
|
+
self.import_module('ubinascii')
|
|
331
|
+
self.load_helper('_hash')
|
|
332
|
+
try:
|
|
333
|
+
result = self._mpy_comm.exec_eval(f"_mt_hash('{_escape_path(path)}')")
|
|
334
|
+
return base64.b64decode(result) if result else None
|
|
335
|
+
except _mpy_comm.CmdError:
|
|
336
|
+
return None
|
|
337
|
+
|
|
338
|
+
def fileinfo(self, files):
|
|
339
|
+
"""Get file info (size and hash) for multiple files in one call
|
|
340
|
+
|
|
341
|
+
Arguments:
|
|
342
|
+
files: dict {path: expected_size} - hash is only computed if sizes match
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
dict {path: (size, hash)} - hash is None if sizes don't match
|
|
346
|
+
dict {path: None} - if file doesn't exist
|
|
347
|
+
Returns None if hashlib not available on device
|
|
348
|
+
"""
|
|
349
|
+
self.import_module('os')
|
|
350
|
+
self.import_module('gc')
|
|
351
|
+
self.import_module('hashlib')
|
|
352
|
+
self.import_module('ubinascii')
|
|
353
|
+
self.load_helper('_hash')
|
|
354
|
+
self.load_helper('fileinfo')
|
|
355
|
+
escaped_files = {_escape_path(p): s for p, s in files.items()}
|
|
356
|
+
# Timeout scales with number of files (base 5s + 0.5s per file)
|
|
357
|
+
timeout = 5 + len(files) * 0.5
|
|
283
358
|
try:
|
|
284
|
-
|
|
359
|
+
result = self._mpy_comm.exec_eval(f"_mt_finfo({escaped_files})", timeout=timeout)
|
|
360
|
+
for path, info in result.items():
|
|
361
|
+
if info and info[1]:
|
|
362
|
+
result[path] = (info[0], base64.b64decode(info[1]))
|
|
363
|
+
return result
|
|
285
364
|
except _mpy_comm.CmdError:
|
|
286
365
|
return None
|
|
287
366
|
|
|
@@ -295,7 +374,6 @@ def _mpytool_hashfile(path):
|
|
|
295
374
|
Returns:
|
|
296
375
|
bytes with file content
|
|
297
376
|
"""
|
|
298
|
-
# Get file size first if callback provided
|
|
299
377
|
total_size = 0
|
|
300
378
|
if progress_callback:
|
|
301
379
|
total_size = self.stat(path)
|
|
@@ -316,22 +394,929 @@ def _mpytool_hashfile(path):
|
|
|
316
394
|
self._mpy_comm.exec("f.close()")
|
|
317
395
|
return data
|
|
318
396
|
|
|
319
|
-
def
|
|
397
|
+
def _encode_chunk(self, chunk, compress=False):
|
|
398
|
+
"""Encode chunk for transfer - choose smallest representation
|
|
399
|
+
|
|
400
|
+
Arguments:
|
|
401
|
+
chunk: bytes to encode
|
|
402
|
+
compress: whether to try compression
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
tuple (command_string, original_chunk_size, encoding_type)
|
|
406
|
+
encoding_type is 'raw', 'base64', or 'compressed'
|
|
407
|
+
"""
|
|
408
|
+
chunk_size = len(chunk)
|
|
409
|
+
raw = repr(chunk)
|
|
410
|
+
raw_len = len(raw)
|
|
411
|
+
|
|
412
|
+
b64 = base64.b64encode(chunk).decode('ascii')
|
|
413
|
+
b64_cmd = f"ub.a2b_base64('{b64}')"
|
|
414
|
+
b64_len = len(b64_cmd)
|
|
415
|
+
|
|
416
|
+
best_cmd = raw
|
|
417
|
+
best_len = raw_len
|
|
418
|
+
best_type = 'raw'
|
|
419
|
+
|
|
420
|
+
if b64_len < best_len:
|
|
421
|
+
best_cmd = b64_cmd
|
|
422
|
+
best_len = b64_len
|
|
423
|
+
best_type = 'base64'
|
|
424
|
+
|
|
425
|
+
if compress:
|
|
426
|
+
compressed = zlib.compress(chunk)
|
|
427
|
+
comp_b64 = base64.b64encode(compressed).decode('ascii')
|
|
428
|
+
comp_cmd = f"df.DeflateIO(_io.BytesIO(ub.a2b_base64('{comp_b64}'))).read()"
|
|
429
|
+
comp_len = len(comp_cmd)
|
|
430
|
+
if comp_len < best_len:
|
|
431
|
+
best_cmd = comp_cmd
|
|
432
|
+
best_len = comp_len
|
|
433
|
+
best_type = 'compressed'
|
|
434
|
+
|
|
435
|
+
return best_cmd, chunk_size, best_type
|
|
436
|
+
|
|
437
|
+
def _detect_chunk_size(self):
|
|
438
|
+
"""Detect optimal chunk size based on device free RAM
|
|
439
|
+
|
|
440
|
+
Returns:
|
|
441
|
+
chunk size in bytes (512, 1024, 2048, 4096, 8192, 16384, or 32768)
|
|
442
|
+
"""
|
|
443
|
+
if self._chunk_size is not None:
|
|
444
|
+
return self._chunk_size
|
|
445
|
+
if Mpy._CHUNK_AUTO_DETECTED is not None:
|
|
446
|
+
return Mpy._CHUNK_AUTO_DETECTED
|
|
447
|
+
self.import_module('gc')
|
|
448
|
+
self._mpy_comm.exec("gc.collect()")
|
|
449
|
+
try:
|
|
450
|
+
free = self._mpy_comm.exec_eval("gc.mem_free()")
|
|
451
|
+
except _mpy_comm.CmdError:
|
|
452
|
+
free = 0
|
|
453
|
+
# Select chunk size based on free RAM (~10-15% of free RAM)
|
|
454
|
+
if free > 256 * 1024:
|
|
455
|
+
chunk = 32768
|
|
456
|
+
elif free > 128 * 1024:
|
|
457
|
+
chunk = 16384
|
|
458
|
+
elif free > 64 * 1024:
|
|
459
|
+
chunk = 8192
|
|
460
|
+
elif free > 48 * 1024:
|
|
461
|
+
chunk = 4096
|
|
462
|
+
elif free > 32 * 1024:
|
|
463
|
+
chunk = 2048
|
|
464
|
+
elif free > 24 * 1024:
|
|
465
|
+
chunk = 1024
|
|
466
|
+
else:
|
|
467
|
+
chunk = 512
|
|
468
|
+
Mpy._CHUNK_AUTO_DETECTED = chunk
|
|
469
|
+
return chunk
|
|
470
|
+
|
|
471
|
+
def _detect_deflate(self):
|
|
472
|
+
"""Detect if deflate module is available and device has enough RAM
|
|
473
|
+
|
|
474
|
+
Returns:
|
|
475
|
+
True if deflate is available and RAM >= 64KB, False otherwise
|
|
476
|
+
"""
|
|
477
|
+
if Mpy._DEFLATE_AVAILABLE is None:
|
|
478
|
+
# Check RAM first - need at least 64KB for decompression
|
|
479
|
+
chunk = self._detect_chunk_size()
|
|
480
|
+
if chunk < 8192: # chunk < 8K means RAM <= 64KB
|
|
481
|
+
Mpy._DEFLATE_AVAILABLE = False
|
|
482
|
+
else:
|
|
483
|
+
try:
|
|
484
|
+
self._mpy_comm.exec("import deflate")
|
|
485
|
+
Mpy._DEFLATE_AVAILABLE = True
|
|
486
|
+
except _mpy_comm.CmdError:
|
|
487
|
+
Mpy._DEFLATE_AVAILABLE = False
|
|
488
|
+
return Mpy._DEFLATE_AVAILABLE
|
|
489
|
+
|
|
490
|
+
def put(self, data, path, progress_callback=None, compress=None):
|
|
320
491
|
"""Write file to device
|
|
321
492
|
|
|
322
493
|
Arguments:
|
|
323
494
|
data: bytes with file content
|
|
324
495
|
path: file path to write
|
|
325
496
|
progress_callback: optional callback(transferred, total) for progress
|
|
497
|
+
compress: None=auto-detect, True=force compression, False=disable
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
tuple (encodings_used, wire_bytes) where:
|
|
501
|
+
encodings_used: set of encoding types ('raw', 'base64', 'compressed')
|
|
502
|
+
wire_bytes: number of bytes sent over the wire (encoded size)
|
|
326
503
|
"""
|
|
504
|
+
chunk_size = self._detect_chunk_size()
|
|
327
505
|
total_size = len(data)
|
|
328
506
|
transferred = 0
|
|
507
|
+
wire_bytes = 0
|
|
508
|
+
encodings_used = set()
|
|
509
|
+
|
|
510
|
+
if compress is None:
|
|
511
|
+
compress = self._detect_deflate()
|
|
512
|
+
|
|
513
|
+
self.import_module('ubinascii as ub')
|
|
514
|
+
if compress:
|
|
515
|
+
self.import_module('deflate as df')
|
|
516
|
+
self.import_module('io as _io')
|
|
517
|
+
|
|
329
518
|
self._mpy_comm.exec(f"f = open('{_escape_path(path)}', 'wb')")
|
|
330
519
|
while data:
|
|
331
|
-
chunk = data[:
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
520
|
+
chunk = data[:chunk_size]
|
|
521
|
+
cmd, orig_size, enc_type = self._encode_chunk(chunk, compress)
|
|
522
|
+
encodings_used.add(enc_type)
|
|
523
|
+
# Wire bytes = command overhead (9 = "f.write(" + ")") + encoded data
|
|
524
|
+
wire_bytes += 9 + len(cmd)
|
|
525
|
+
count = self._mpy_comm.exec_eval(f"f.write({cmd})", timeout=10)
|
|
526
|
+
data = data[orig_size:]
|
|
527
|
+
transferred += orig_size
|
|
335
528
|
if progress_callback:
|
|
336
529
|
progress_callback(transferred, total_size)
|
|
337
530
|
self._mpy_comm.exec("f.close()")
|
|
531
|
+
# Run garbage collection to free memory and allow flash to settle
|
|
532
|
+
self.import_module('gc')
|
|
533
|
+
self._mpy_comm.exec("gc.collect()")
|
|
534
|
+
return encodings_used, wire_bytes
|
|
535
|
+
|
|
536
|
+
def platform(self):
|
|
537
|
+
"""Get platform information
|
|
538
|
+
|
|
539
|
+
Returns:
|
|
540
|
+
dict with keys:
|
|
541
|
+
'platform': platform name (e.g. 'esp32')
|
|
542
|
+
'version': MicroPython version string
|
|
543
|
+
'impl': implementation name (e.g. 'micropython')
|
|
544
|
+
'machine': machine description (or None)
|
|
545
|
+
"""
|
|
546
|
+
self.import_module('sys')
|
|
547
|
+
self.import_module('os')
|
|
548
|
+
|
|
549
|
+
platform = self._mpy_comm.exec_eval("repr(sys.platform)")
|
|
550
|
+
version = self._mpy_comm.exec_eval("repr(sys.version)")
|
|
551
|
+
impl = self._mpy_comm.exec_eval("repr(sys.implementation.name)")
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
uname = self._mpy_comm.exec_eval("tuple(os.uname())")
|
|
555
|
+
machine = uname[4] if len(uname) > 4 else None
|
|
556
|
+
except _mpy_comm.CmdError:
|
|
557
|
+
machine = None
|
|
558
|
+
|
|
559
|
+
return {
|
|
560
|
+
'platform': platform,
|
|
561
|
+
'version': version,
|
|
562
|
+
'impl': impl,
|
|
563
|
+
'machine': machine,
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
def memory(self):
|
|
567
|
+
"""Get memory (RAM) information
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
dict with keys:
|
|
571
|
+
'free': free RAM in bytes
|
|
572
|
+
'alloc': allocated RAM in bytes
|
|
573
|
+
'total': total RAM in bytes
|
|
574
|
+
"""
|
|
575
|
+
self.import_module('gc')
|
|
576
|
+
self._mpy_comm.exec("gc.collect()")
|
|
577
|
+
|
|
578
|
+
mem_free = self._mpy_comm.exec_eval("gc.mem_free()")
|
|
579
|
+
mem_alloc = self._mpy_comm.exec_eval("gc.mem_alloc()")
|
|
580
|
+
|
|
581
|
+
return {
|
|
582
|
+
'free': mem_free,
|
|
583
|
+
'alloc': mem_alloc,
|
|
584
|
+
'total': mem_free + mem_alloc,
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
def unique_id(self):
|
|
588
|
+
"""Get device unique ID (serial number)
|
|
589
|
+
|
|
590
|
+
Returns:
|
|
591
|
+
hex string or None if not available
|
|
592
|
+
"""
|
|
593
|
+
try:
|
|
594
|
+
return self._mpy_comm.exec_eval(
|
|
595
|
+
"repr(__import__('machine').unique_id().hex())"
|
|
596
|
+
)
|
|
597
|
+
except _mpy_comm.CmdError:
|
|
598
|
+
return None
|
|
599
|
+
|
|
600
|
+
def mac_addresses(self):
|
|
601
|
+
"""Get network MAC addresses
|
|
602
|
+
|
|
603
|
+
Returns:
|
|
604
|
+
list of (interface_name, mac_address) tuples
|
|
605
|
+
"""
|
|
606
|
+
addresses = []
|
|
607
|
+
try:
|
|
608
|
+
self.import_module('network')
|
|
609
|
+
try:
|
|
610
|
+
mac = self._mpy_comm.exec_eval(
|
|
611
|
+
"repr(network.WLAN(network.STA_IF).config('mac').hex(':'))"
|
|
612
|
+
)
|
|
613
|
+
addresses.append(('WiFi', mac))
|
|
614
|
+
except _mpy_comm.CmdError:
|
|
615
|
+
pass
|
|
616
|
+
try:
|
|
617
|
+
mac = self._mpy_comm.exec_eval(
|
|
618
|
+
"repr(network.WLAN(network.AP_IF).config('mac').hex(':'))"
|
|
619
|
+
)
|
|
620
|
+
if not addresses or mac != addresses[0][1]:
|
|
621
|
+
addresses.append(('WiFi AP', mac))
|
|
622
|
+
except _mpy_comm.CmdError:
|
|
623
|
+
pass
|
|
624
|
+
try:
|
|
625
|
+
mac = self._mpy_comm.exec_eval(
|
|
626
|
+
"repr(network.LAN().config('mac').hex(':'))"
|
|
627
|
+
)
|
|
628
|
+
addresses.append(('LAN', mac))
|
|
629
|
+
except _mpy_comm.CmdError:
|
|
630
|
+
pass
|
|
631
|
+
except _mpy_comm.CmdError:
|
|
632
|
+
pass
|
|
633
|
+
return addresses
|
|
634
|
+
|
|
635
|
+
def filesystems(self):
|
|
636
|
+
"""Get filesystem information
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
list of dicts with keys: mount, total, free, used
|
|
640
|
+
"""
|
|
641
|
+
self.import_module('os')
|
|
642
|
+
result = []
|
|
643
|
+
|
|
644
|
+
try:
|
|
645
|
+
fs_stat = self._mpy_comm.exec_eval("os.statvfs('/')")
|
|
646
|
+
fs_total = fs_stat[0] * fs_stat[2]
|
|
647
|
+
fs_free = fs_stat[0] * fs_stat[3]
|
|
648
|
+
if fs_total > 0:
|
|
649
|
+
result.append({
|
|
650
|
+
'mount': '/',
|
|
651
|
+
'total': fs_total,
|
|
652
|
+
'free': fs_free,
|
|
653
|
+
'used': fs_total - fs_free,
|
|
654
|
+
})
|
|
655
|
+
except _mpy_comm.CmdError:
|
|
656
|
+
pass
|
|
657
|
+
|
|
658
|
+
# Check subdirectories for additional mount points
|
|
659
|
+
try:
|
|
660
|
+
root_dirs = self._mpy_comm.exec_eval(
|
|
661
|
+
"[d[0] for d in os.ilistdir('/') if d[1] == 0x4000]"
|
|
662
|
+
)
|
|
663
|
+
for dirname in root_dirs:
|
|
664
|
+
try:
|
|
665
|
+
path = '/' + dirname
|
|
666
|
+
sub_stat = self._mpy_comm.exec_eval(f"os.statvfs('{path}')")
|
|
667
|
+
sub_total = sub_stat[0] * sub_stat[2]
|
|
668
|
+
sub_free = sub_stat[0] * sub_stat[3]
|
|
669
|
+
# Skip if same as root or zero size
|
|
670
|
+
if sub_total == 0 or any(f['total'] == sub_total for f in result):
|
|
671
|
+
continue
|
|
672
|
+
result.append({
|
|
673
|
+
'mount': path,
|
|
674
|
+
'total': sub_total,
|
|
675
|
+
'free': sub_free,
|
|
676
|
+
'used': sub_total - sub_free,
|
|
677
|
+
})
|
|
678
|
+
except _mpy_comm.CmdError:
|
|
679
|
+
pass
|
|
680
|
+
except _mpy_comm.CmdError:
|
|
681
|
+
pass
|
|
682
|
+
|
|
683
|
+
return result
|
|
684
|
+
|
|
685
|
+
def info(self):
|
|
686
|
+
"""Get all device information (convenience method)
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
dict combining platform(), memory(), unique_id(),
|
|
690
|
+
mac_addresses() and filesystems()
|
|
691
|
+
"""
|
|
692
|
+
result = self.platform()
|
|
693
|
+
result['unique_id'] = self.unique_id()
|
|
694
|
+
result['mac_addresses'] = self.mac_addresses()
|
|
695
|
+
result.update({f'mem_{k}': v for k, v in self.memory().items()})
|
|
696
|
+
result['filesystems'] = self.filesystems()
|
|
697
|
+
return result
|
|
698
|
+
|
|
699
|
+
_PART_TYPES = {0: 'app', 1: 'data'}
|
|
700
|
+
_PART_SUBTYPES = {
|
|
701
|
+
# App subtypes (type 0)
|
|
702
|
+
0: {0: 'factory', 16: 'ota_0', 17: 'ota_1', 18: 'ota_2', 19: 'ota_3', 32: 'test'},
|
|
703
|
+
# Data subtypes (type 1)
|
|
704
|
+
1: {0: 'ota', 1: 'phy', 2: 'nvs', 3: 'coredump', 4: 'nvs_keys',
|
|
705
|
+
5: 'efuse', 128: 'esphttpd', 129: 'fat', 130: 'spiffs', 131: 'littlefs'},
|
|
706
|
+
}
|
|
707
|
+
# Subtypes that can contain a filesystem (for auto-detection)
|
|
708
|
+
_FS_SUBTYPES = {129, 130, 131} # fat, spiffs, littlefs
|
|
709
|
+
|
|
710
|
+
def _detect_fs_from_magic(self, magic):
|
|
711
|
+
"""Detect filesystem type and details from magic bytes.
|
|
712
|
+
|
|
713
|
+
Args:
|
|
714
|
+
magic: First 512 bytes from partition/flash (boot sector)
|
|
715
|
+
|
|
716
|
+
Returns:
|
|
717
|
+
dict with keys:
|
|
718
|
+
'type': filesystem type ('littlefs2', 'fat16', 'fat32', 'exfat', None)
|
|
719
|
+
'block_size': block/cluster size in bytes (if detected)
|
|
720
|
+
'label': volume label (if detected)
|
|
721
|
+
or None if not enough data
|
|
722
|
+
"""
|
|
723
|
+
if len(magic) < 16:
|
|
724
|
+
return None
|
|
725
|
+
|
|
726
|
+
result = {'type': None, 'block_size': None, 'label': None}
|
|
727
|
+
|
|
728
|
+
# LittleFS v2: "littlefs" string at offset 8
|
|
729
|
+
# Note: LittleFS uses inline metadata format, block_size is not at fixed offset
|
|
730
|
+
# We detect the filesystem type but can't reliably get block_size from magic
|
|
731
|
+
if magic[8:16] == b'littlefs':
|
|
732
|
+
result['type'] = 'littlefs2'
|
|
733
|
+
# Block size must be obtained from device (ioctl) or partition info
|
|
734
|
+
return result
|
|
735
|
+
|
|
736
|
+
# Check for FAT boot sector signature (need 512 bytes)
|
|
737
|
+
if len(magic) >= 512:
|
|
738
|
+
# Boot sector signature at 510-511
|
|
739
|
+
if magic[510:512] == b'\x55\xAA':
|
|
740
|
+
import struct
|
|
741
|
+
# Bytes per sector (offset 11-12)
|
|
742
|
+
bytes_per_sector = struct.unpack('<H', magic[11:13])[0]
|
|
743
|
+
# Sectors per cluster (offset 13)
|
|
744
|
+
sectors_per_cluster = magic[13]
|
|
745
|
+
result['block_size'] = bytes_per_sector * sectors_per_cluster
|
|
746
|
+
|
|
747
|
+
# Check for exFAT first (has "EXFAT " at offset 3)
|
|
748
|
+
if magic[3:11] == b'EXFAT ':
|
|
749
|
+
result['type'] = 'exfat'
|
|
750
|
+
return result
|
|
751
|
+
|
|
752
|
+
# FAT type string location differs between FAT16 and FAT32
|
|
753
|
+
# FAT16: "FAT16 " at offset 54
|
|
754
|
+
# FAT32: "FAT32 " at offset 82
|
|
755
|
+
if magic[54:62] == b'FAT16 ':
|
|
756
|
+
result['type'] = 'fat16'
|
|
757
|
+
# Volume label at offset 43 (11 bytes)
|
|
758
|
+
label = magic[43:54].rstrip(b' \x00').decode('ascii', errors='ignore')
|
|
759
|
+
if label and label != 'NO NAME':
|
|
760
|
+
result['label'] = label
|
|
761
|
+
elif magic[82:90] == b'FAT32 ':
|
|
762
|
+
result['type'] = 'fat32'
|
|
763
|
+
# Volume label at offset 71 (11 bytes)
|
|
764
|
+
label = magic[71:82].rstrip(b' \x00').decode('ascii', errors='ignore')
|
|
765
|
+
if label and label != 'NO NAME':
|
|
766
|
+
result['label'] = label
|
|
767
|
+
elif magic[54:59] == b'FAT12':
|
|
768
|
+
result['type'] = 'fat12'
|
|
769
|
+
else:
|
|
770
|
+
# Generic FAT (can't determine type)
|
|
771
|
+
result['type'] = 'fat'
|
|
772
|
+
return result
|
|
773
|
+
|
|
774
|
+
return result if result['type'] else None
|
|
775
|
+
|
|
776
|
+
def _read_partition_magic(self, label, size=512):
|
|
777
|
+
"""Read first bytes from partition for filesystem detection.
|
|
778
|
+
|
|
779
|
+
Args:
|
|
780
|
+
label: Partition label
|
|
781
|
+
size: Number of bytes to read
|
|
782
|
+
|
|
783
|
+
Returns:
|
|
784
|
+
tuple (magic_bytes, block_size) or None if read fails
|
|
785
|
+
"""
|
|
786
|
+
try:
|
|
787
|
+
self.load_helper('partition_magic')
|
|
788
|
+
return self._mpy_comm.exec_eval(f"_mt_pmagic('{label}', {size})")
|
|
789
|
+
except _mpy_comm.CmdError:
|
|
790
|
+
return None
|
|
791
|
+
|
|
792
|
+
def partitions(self):
|
|
793
|
+
"""Get ESP32 partition information
|
|
794
|
+
|
|
795
|
+
Returns:
|
|
796
|
+
dict with keys:
|
|
797
|
+
'partitions': list of partition info dicts with keys:
|
|
798
|
+
label, type, type_name, subtype, subtype_name,
|
|
799
|
+
offset, size, encrypted, running
|
|
800
|
+
'running': label of currently running partition
|
|
801
|
+
'boot': label of boot partition
|
|
802
|
+
'next_ota': label of next OTA partition (or None)
|
|
803
|
+
'next_ota_size': size of next OTA partition (or None)
|
|
804
|
+
|
|
805
|
+
Raises:
|
|
806
|
+
MpyError: if not ESP32 or partition module not available
|
|
807
|
+
"""
|
|
808
|
+
try:
|
|
809
|
+
self.import_module('esp32')
|
|
810
|
+
except _mpy_comm.CmdError:
|
|
811
|
+
raise _mpy_comm.MpyError("Partition info not available (ESP32 only)")
|
|
812
|
+
|
|
813
|
+
running = self._mpy_comm.exec_eval(
|
|
814
|
+
"repr(esp32.Partition(esp32.Partition.RUNNING).info()[4])"
|
|
815
|
+
)
|
|
816
|
+
|
|
817
|
+
raw_parts = self._mpy_comm.exec_eval(
|
|
818
|
+
"[p.info() for p in "
|
|
819
|
+
"esp32.Partition.find(esp32.Partition.TYPE_APP) + "
|
|
820
|
+
"esp32.Partition.find(esp32.Partition.TYPE_DATA)]"
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
partitions = []
|
|
824
|
+
next_ota_size = None
|
|
825
|
+
for ptype, subtype, offset, size, label, encrypted in raw_parts:
|
|
826
|
+
type_name = self._PART_TYPES.get(ptype, str(ptype))
|
|
827
|
+
subtype_name = self._PART_SUBTYPES.get(ptype, {}).get(subtype, str(subtype))
|
|
828
|
+
part_info = {
|
|
829
|
+
'label': label,
|
|
830
|
+
'type': ptype,
|
|
831
|
+
'type_name': type_name,
|
|
832
|
+
'subtype': subtype,
|
|
833
|
+
'subtype_name': subtype_name,
|
|
834
|
+
'offset': offset,
|
|
835
|
+
'size': size,
|
|
836
|
+
'encrypted': encrypted,
|
|
837
|
+
'running': label == running,
|
|
838
|
+
'filesystem': None,
|
|
839
|
+
'fs_block_size': None,
|
|
840
|
+
}
|
|
841
|
+
# Detect actual filesystem for data partitions with FS subtypes
|
|
842
|
+
if ptype == 1 and subtype in self._FS_SUBTYPES: # TYPE_DATA
|
|
843
|
+
result = self._read_partition_magic(label)
|
|
844
|
+
if result:
|
|
845
|
+
magic, block_size = result
|
|
846
|
+
part_info['fs_block_size'] = block_size
|
|
847
|
+
fs_info = self._detect_fs_from_magic(magic)
|
|
848
|
+
if fs_info:
|
|
849
|
+
part_info['filesystem'] = fs_info.get('type')
|
|
850
|
+
# For FAT, use cluster size from magic; for others use partition block size
|
|
851
|
+
if fs_info.get('block_size') and 'fat' in (fs_info.get('type') or ''):
|
|
852
|
+
part_info['fs_cluster_size'] = fs_info.get('block_size')
|
|
853
|
+
partitions.append(part_info)
|
|
854
|
+
|
|
855
|
+
try:
|
|
856
|
+
boot = self._mpy_comm.exec_eval(
|
|
857
|
+
"repr(esp32.Partition(esp32.Partition.BOOT).info()[4])"
|
|
858
|
+
)
|
|
859
|
+
except _mpy_comm.CmdError:
|
|
860
|
+
boot = None
|
|
861
|
+
|
|
862
|
+
# Get next OTA partition (get size and label separately to handle string eval)
|
|
863
|
+
try:
|
|
864
|
+
next_ota_size = self._mpy_comm.exec_eval(
|
|
865
|
+
"esp32.Partition(esp32.Partition.RUNNING).get_next_update().info()[3]"
|
|
866
|
+
)
|
|
867
|
+
next_ota = self._mpy_comm.exec_eval(
|
|
868
|
+
"repr(esp32.Partition(esp32.Partition.RUNNING).get_next_update().info()[4])"
|
|
869
|
+
)
|
|
870
|
+
except _mpy_comm.CmdError:
|
|
871
|
+
next_ota = None
|
|
872
|
+
next_ota_size = None
|
|
873
|
+
|
|
874
|
+
return {
|
|
875
|
+
'partitions': partitions,
|
|
876
|
+
'running': running,
|
|
877
|
+
'boot': boot,
|
|
878
|
+
'next_ota': next_ota,
|
|
879
|
+
'next_ota_size': next_ota_size,
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
def flash_info(self):
|
|
883
|
+
"""Get RP2 flash information
|
|
884
|
+
|
|
885
|
+
Returns:
|
|
886
|
+
dict with keys:
|
|
887
|
+
'size': total flash size in bytes
|
|
888
|
+
'block_size': block size in bytes
|
|
889
|
+
'block_count': number of blocks
|
|
890
|
+
'filesystem': detected filesystem type ('littlefs2', 'fat', 'unknown')
|
|
891
|
+
|
|
892
|
+
Raises:
|
|
893
|
+
MpyError: if not RP2 or rp2.Flash not available
|
|
894
|
+
"""
|
|
895
|
+
try:
|
|
896
|
+
self.import_module('rp2')
|
|
897
|
+
except _mpy_comm.CmdError as err:
|
|
898
|
+
raise _mpy_comm.MpyError("Flash info not available (RP2 only)") from err
|
|
899
|
+
|
|
900
|
+
# Get flash info via ioctl
|
|
901
|
+
# ioctl(4) = block count, ioctl(5) = block size
|
|
902
|
+
self._mpy_comm.exec("_f = rp2.Flash()")
|
|
903
|
+
info = self._mpy_comm.exec_eval("(_f.ioctl(4, 0), _f.ioctl(5, 0))")
|
|
904
|
+
block_count, block_size = info
|
|
905
|
+
size = block_count * block_size
|
|
906
|
+
|
|
907
|
+
# Read first 512 bytes for filesystem detection
|
|
908
|
+
self._mpy_comm.exec("_b = bytearray(512); _f.readblocks(0, _b)")
|
|
909
|
+
magic = self._mpy_comm.exec_eval("bytes(_b)")
|
|
910
|
+
|
|
911
|
+
# Use common filesystem detection
|
|
912
|
+
fs_info = self._detect_fs_from_magic(magic)
|
|
913
|
+
fs_type = fs_info.get('type') if fs_info else None
|
|
914
|
+
fs_block_size = fs_info.get('block_size') if fs_info else None
|
|
915
|
+
|
|
916
|
+
return {
|
|
917
|
+
'size': size,
|
|
918
|
+
'block_size': block_size,
|
|
919
|
+
'block_count': block_count,
|
|
920
|
+
'filesystem': fs_type or 'unknown',
|
|
921
|
+
'fs_block_size': fs_block_size,
|
|
922
|
+
'magic': magic[:16],
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
def flash_read(self, label=None, progress_callback=None):
|
|
926
|
+
"""Read flash/partition content
|
|
927
|
+
|
|
928
|
+
Arguments:
|
|
929
|
+
label: partition label (ESP32) or None (RP2 entire user flash)
|
|
930
|
+
progress_callback: optional callback(transferred, total)
|
|
931
|
+
|
|
932
|
+
Returns:
|
|
933
|
+
bytes with flash/partition content
|
|
934
|
+
|
|
935
|
+
Raises:
|
|
936
|
+
MpyError: if wrong platform or partition not found
|
|
937
|
+
"""
|
|
938
|
+
platform = self._get_platform()
|
|
939
|
+
self.import_module('ubinascii as ub')
|
|
940
|
+
|
|
941
|
+
if label:
|
|
942
|
+
# ESP32 partition
|
|
943
|
+
if platform != 'esp32':
|
|
944
|
+
raise _mpy_comm.MpyError("Partition label requires ESP32")
|
|
945
|
+
self.import_module('esp32')
|
|
946
|
+
self.load_helper('partition_find')
|
|
947
|
+
try:
|
|
948
|
+
part_info = self._mpy_comm.exec_eval(f"_mt_pfind('{label}').info()")
|
|
949
|
+
except _mpy_comm.CmdError:
|
|
950
|
+
raise _mpy_comm.MpyError(f"Partition '{label}' not found")
|
|
951
|
+
_, _, _, total_size, _, _ = part_info
|
|
952
|
+
block_size = 4096
|
|
953
|
+
self._mpy_comm.exec(f"_dev = _mt_pfind('{label}')")
|
|
954
|
+
else:
|
|
955
|
+
# RP2 flash
|
|
956
|
+
if platform != 'rp2':
|
|
957
|
+
raise _mpy_comm.MpyError("Flash read without label requires RP2")
|
|
958
|
+
self.import_module('rp2')
|
|
959
|
+
self._mpy_comm.exec("_dev = rp2.Flash()")
|
|
960
|
+
info = self._mpy_comm.exec_eval("(_dev.ioctl(4, 0), _dev.ioctl(5, 0))")
|
|
961
|
+
block_count, block_size = info
|
|
962
|
+
total_size = block_count * block_size
|
|
963
|
+
|
|
964
|
+
total_blocks = (total_size + block_size - 1) // block_size
|
|
965
|
+
chunk_blocks = 8 # 32KB per iteration
|
|
966
|
+
data = bytearray()
|
|
967
|
+
block_num = 0
|
|
968
|
+
|
|
969
|
+
while block_num < total_blocks:
|
|
970
|
+
blocks_to_read = min(chunk_blocks, total_blocks - block_num)
|
|
971
|
+
bytes_to_read = blocks_to_read * block_size
|
|
972
|
+
self._mpy_comm.exec(
|
|
973
|
+
f"_buf=bytearray({bytes_to_read}); _dev.readblocks({block_num}, _buf)")
|
|
974
|
+
b64_data = self._mpy_comm.exec_eval("repr(ub.b2a_base64(_buf).decode())")
|
|
975
|
+
chunk = base64.b64decode(b64_data)
|
|
976
|
+
data.extend(chunk)
|
|
977
|
+
block_num += blocks_to_read
|
|
978
|
+
if progress_callback:
|
|
979
|
+
progress_callback(min(block_num * block_size, total_size), total_size)
|
|
980
|
+
|
|
981
|
+
self._mpy_comm.exec("del _dev")
|
|
982
|
+
return bytes(data[:total_size])
|
|
983
|
+
|
|
984
|
+
def flash_write(self, data, label=None, progress_callback=None, compress=None):
|
|
985
|
+
"""Write data to flash/partition
|
|
986
|
+
|
|
987
|
+
WARNING: This will overwrite the filesystem! Use with caution.
|
|
988
|
+
|
|
989
|
+
Arguments:
|
|
990
|
+
data: bytes to write (will be padded to block size)
|
|
991
|
+
label: partition label (ESP32) or None (RP2 entire user flash)
|
|
992
|
+
progress_callback: optional callback(transferred, total) for RP2,
|
|
993
|
+
callback(transferred, total, wire_bytes) for ESP32
|
|
994
|
+
compress: None=auto-detect, True=force, False=disable (ESP32 only)
|
|
995
|
+
|
|
996
|
+
Returns:
|
|
997
|
+
dict with keys: 'size', 'written', and for ESP32: 'wire_bytes', 'compressed'
|
|
998
|
+
|
|
999
|
+
Raises:
|
|
1000
|
+
MpyError: if wrong platform, data too large, or partition not found
|
|
1001
|
+
"""
|
|
1002
|
+
platform = self._get_platform()
|
|
1003
|
+
|
|
1004
|
+
if label:
|
|
1005
|
+
# ESP32 partition - use _write_partition_data for compression support
|
|
1006
|
+
if platform != 'esp32':
|
|
1007
|
+
raise _mpy_comm.MpyError("Partition label requires ESP32")
|
|
1008
|
+
self.import_module('esp32')
|
|
1009
|
+
self.load_helper('partition_find')
|
|
1010
|
+
try:
|
|
1011
|
+
part_info = self._mpy_comm.exec_eval(f"_mt_pfind('{label}').info()")
|
|
1012
|
+
except _mpy_comm.CmdError:
|
|
1013
|
+
raise _mpy_comm.MpyError(f"Partition '{label}' not found")
|
|
1014
|
+
_, _, _, part_size, part_label, _ = part_info
|
|
1015
|
+
self._mpy_comm.exec(f"_dev = _mt_pfind('{label}')")
|
|
1016
|
+
|
|
1017
|
+
wire_bytes, used_compress = self._write_partition_data(
|
|
1018
|
+
'_dev', data, len(data), part_size, progress_callback, compress)
|
|
1019
|
+
|
|
1020
|
+
self._mpy_comm.exec("del _dev")
|
|
1021
|
+
self.import_module('gc')
|
|
1022
|
+
self._mpy_comm.exec("gc.collect()")
|
|
1023
|
+
|
|
1024
|
+
return {
|
|
1025
|
+
'size': part_size,
|
|
1026
|
+
'written': len(data),
|
|
1027
|
+
'wire_bytes': wire_bytes,
|
|
1028
|
+
'compressed': used_compress,
|
|
1029
|
+
}
|
|
1030
|
+
else:
|
|
1031
|
+
# RP2 flash - simple block write
|
|
1032
|
+
if platform != 'rp2':
|
|
1033
|
+
raise _mpy_comm.MpyError("Flash write without label requires RP2")
|
|
1034
|
+
self.import_module('rp2')
|
|
1035
|
+
self._mpy_comm.exec("_dev = rp2.Flash()")
|
|
1036
|
+
info = self._mpy_comm.exec_eval("(_dev.ioctl(4, 0), _dev.ioctl(5, 0))")
|
|
1037
|
+
block_count, block_size = info
|
|
1038
|
+
total_size = block_count * block_size
|
|
1039
|
+
|
|
1040
|
+
if len(data) > total_size:
|
|
1041
|
+
raise _mpy_comm.MpyError(
|
|
1042
|
+
f"Data too large: {len(data)} bytes, flash size: {total_size} bytes")
|
|
1043
|
+
|
|
1044
|
+
self.import_module('ubinascii as ub')
|
|
1045
|
+
|
|
1046
|
+
# Pad data to block size
|
|
1047
|
+
if len(data) % block_size:
|
|
1048
|
+
padding = block_size - (len(data) % block_size)
|
|
1049
|
+
data = data + b'\xff' * padding
|
|
1050
|
+
|
|
1051
|
+
chunk_blocks = 8 # 32KB per iteration
|
|
1052
|
+
block_num = 0
|
|
1053
|
+
total_blocks = len(data) // block_size
|
|
1054
|
+
|
|
1055
|
+
while block_num < total_blocks:
|
|
1056
|
+
blocks_to_write = min(chunk_blocks, total_blocks - block_num)
|
|
1057
|
+
offset = block_num * block_size
|
|
1058
|
+
chunk = data[offset:offset + blocks_to_write * block_size]
|
|
1059
|
+
b64_chunk = base64.b64encode(chunk).decode('ascii')
|
|
1060
|
+
self._mpy_comm.exec(f"_buf=ub.a2b_base64('{b64_chunk}')")
|
|
1061
|
+
self._mpy_comm.exec(f"_dev.writeblocks({block_num}, _buf)")
|
|
1062
|
+
block_num += blocks_to_write
|
|
1063
|
+
if progress_callback:
|
|
1064
|
+
progress_callback(block_num * block_size, len(data))
|
|
1065
|
+
|
|
1066
|
+
self._mpy_comm.exec("del _dev")
|
|
1067
|
+
return {
|
|
1068
|
+
'size': total_size,
|
|
1069
|
+
'written': len(data),
|
|
1070
|
+
}
|
|
1071
|
+
|
|
1072
|
+
def flash_erase(self, label=None, full=False, progress_callback=None):
|
|
1073
|
+
"""Erase flash/partition
|
|
1074
|
+
|
|
1075
|
+
Arguments:
|
|
1076
|
+
label: partition label (ESP32) or None (RP2 entire user flash)
|
|
1077
|
+
full: if True, erase entire flash/partition; if False, erase first 2 blocks
|
|
1078
|
+
progress_callback: optional callback(transferred, total)
|
|
1079
|
+
|
|
1080
|
+
Returns:
|
|
1081
|
+
dict with keys: 'erased', and 'label' for ESP32
|
|
1082
|
+
|
|
1083
|
+
Raises:
|
|
1084
|
+
MpyError: if wrong platform or partition not found
|
|
1085
|
+
"""
|
|
1086
|
+
platform = self._get_platform()
|
|
1087
|
+
|
|
1088
|
+
if label:
|
|
1089
|
+
# ESP32 partition
|
|
1090
|
+
if platform != 'esp32':
|
|
1091
|
+
raise _mpy_comm.MpyError("Partition label requires ESP32")
|
|
1092
|
+
self.import_module('esp32')
|
|
1093
|
+
self.load_helper('partition_find')
|
|
1094
|
+
try:
|
|
1095
|
+
part_info = self._mpy_comm.exec_eval(f"_mt_pfind('{label}').info()")
|
|
1096
|
+
except _mpy_comm.CmdError:
|
|
1097
|
+
raise _mpy_comm.MpyError(f"Partition '{label}' not found")
|
|
1098
|
+
_, _, _, part_size, _, _ = part_info
|
|
1099
|
+
block_size = 4096
|
|
1100
|
+
total_blocks = part_size // block_size
|
|
1101
|
+
self._mpy_comm.exec(f"_dev = _mt_pfind('{label}')")
|
|
1102
|
+
else:
|
|
1103
|
+
# RP2 flash
|
|
1104
|
+
if platform != 'rp2':
|
|
1105
|
+
raise _mpy_comm.MpyError("Flash erase without label requires RP2")
|
|
1106
|
+
self.import_module('rp2')
|
|
1107
|
+
self._mpy_comm.exec("_dev = rp2.Flash()")
|
|
1108
|
+
info = self._mpy_comm.exec_eval("(_dev.ioctl(4, 0), _dev.ioctl(5, 0))")
|
|
1109
|
+
total_blocks, block_size = info
|
|
1110
|
+
|
|
1111
|
+
if full:
|
|
1112
|
+
blocks_to_erase = total_blocks
|
|
1113
|
+
else:
|
|
1114
|
+
blocks_to_erase = min(2, total_blocks) # First 2 blocks for FS reset
|
|
1115
|
+
|
|
1116
|
+
total_bytes = blocks_to_erase * block_size
|
|
1117
|
+
|
|
1118
|
+
# Prepare empty block buffer on device
|
|
1119
|
+
self._mpy_comm.exec(f"_buf = b'\\xff' * {block_size}")
|
|
1120
|
+
|
|
1121
|
+
for block_num in range(blocks_to_erase):
|
|
1122
|
+
self._mpy_comm.exec(f"_dev.writeblocks({block_num}, _buf)")
|
|
1123
|
+
if progress_callback:
|
|
1124
|
+
progress_callback((block_num + 1) * block_size, total_bytes)
|
|
1125
|
+
|
|
1126
|
+
self._mpy_comm.exec("del _dev")
|
|
1127
|
+
|
|
1128
|
+
result = {'erased': total_bytes}
|
|
1129
|
+
if label:
|
|
1130
|
+
result['label'] = label
|
|
1131
|
+
return result
|
|
1132
|
+
|
|
1133
|
+
def soft_reset(self):
|
|
1134
|
+
"""Soft reset device (Ctrl-D in REPL)
|
|
1135
|
+
|
|
1136
|
+
Runs boot.py and main.py after reset.
|
|
1137
|
+
"""
|
|
1138
|
+
self._mpy_comm.soft_reset()
|
|
1139
|
+
self.reset_state()
|
|
1140
|
+
|
|
1141
|
+
def soft_reset_raw(self):
|
|
1142
|
+
"""Soft reset in raw REPL mode
|
|
1143
|
+
|
|
1144
|
+
Clears RAM but doesn't run boot.py/main.py.
|
|
1145
|
+
"""
|
|
1146
|
+
self._mpy_comm.soft_reset_raw()
|
|
1147
|
+
self.reset_state()
|
|
1148
|
+
|
|
1149
|
+
def machine_reset(self, reconnect=True, timeout=None):
|
|
1150
|
+
"""MCU reset using machine.reset()
|
|
1151
|
+
|
|
1152
|
+
Arguments:
|
|
1153
|
+
reconnect: if True, attempt to reconnect after reset
|
|
1154
|
+
timeout: reconnect timeout in seconds (None = default)
|
|
1155
|
+
|
|
1156
|
+
Returns:
|
|
1157
|
+
True if reconnected successfully, False otherwise
|
|
1158
|
+
|
|
1159
|
+
Note: For USB-CDC ports, the port may disappear and reappear.
|
|
1160
|
+
"""
|
|
1161
|
+
self._mpy_comm.enter_raw_repl()
|
|
1162
|
+
self._conn.write(b"import machine; machine.reset()\x04")
|
|
1163
|
+
self.reset_state()
|
|
1164
|
+
if reconnect:
|
|
1165
|
+
self._conn.reconnect(timeout=timeout)
|
|
1166
|
+
return True
|
|
1167
|
+
return False
|
|
1168
|
+
|
|
1169
|
+
def machine_bootloader(self):
|
|
1170
|
+
"""Enter bootloader using machine.bootloader()
|
|
1171
|
+
|
|
1172
|
+
Note: Connection will be lost after this call.
|
|
1173
|
+
"""
|
|
1174
|
+
self._mpy_comm.enter_raw_repl()
|
|
1175
|
+
self._conn.write(b"import machine; machine.bootloader()\x04")
|
|
1176
|
+
self.reset_state()
|
|
1177
|
+
|
|
1178
|
+
def hard_reset(self):
|
|
1179
|
+
"""Hardware reset using RTS signal (serial only)
|
|
1180
|
+
|
|
1181
|
+
Raises:
|
|
1182
|
+
NotImplementedError: if connection doesn't support hardware reset
|
|
1183
|
+
"""
|
|
1184
|
+
self._conn.hard_reset()
|
|
1185
|
+
self.reset_state()
|
|
1186
|
+
|
|
1187
|
+
def reset_to_bootloader(self):
|
|
1188
|
+
"""Enter bootloader using DTR/RTS signals (ESP32 serial only)
|
|
1189
|
+
|
|
1190
|
+
Raises:
|
|
1191
|
+
NotImplementedError: if connection doesn't support this
|
|
1192
|
+
"""
|
|
1193
|
+
self._conn.reset_to_bootloader()
|
|
1194
|
+
self.reset_state()
|
|
1195
|
+
|
|
1196
|
+
def _write_partition_data(
|
|
1197
|
+
self, part_var, data, data_size, part_size,
|
|
1198
|
+
progress_callback=None, compress=None):
|
|
1199
|
+
"""Write data to partition (shared implementation)
|
|
1200
|
+
|
|
1201
|
+
Arguments:
|
|
1202
|
+
part_var: variable name holding partition on device (e.g. '_part')
|
|
1203
|
+
data: bytes to write
|
|
1204
|
+
data_size: size of data
|
|
1205
|
+
part_size: partition size (for validation)
|
|
1206
|
+
progress_callback: optional callback(transferred, total, wire_bytes)
|
|
1207
|
+
compress: None=auto-detect, True=force, False=disable
|
|
1208
|
+
|
|
1209
|
+
Returns:
|
|
1210
|
+
tuple: (wire_bytes, used_compress)
|
|
1211
|
+
"""
|
|
1212
|
+
if data_size > part_size:
|
|
1213
|
+
raise _mpy_comm.MpyError(
|
|
1214
|
+
f"Data too large: {data_size} > {part_size} bytes"
|
|
1215
|
+
)
|
|
1216
|
+
|
|
1217
|
+
if compress is None:
|
|
1218
|
+
compress = self._detect_deflate()
|
|
1219
|
+
|
|
1220
|
+
flash_block = 4096
|
|
1221
|
+
chunk_size = self._detect_chunk_size()
|
|
1222
|
+
chunk_size = max(flash_block, (chunk_size // flash_block) * flash_block)
|
|
1223
|
+
|
|
1224
|
+
self.import_module('ubinascii as ub')
|
|
1225
|
+
if compress:
|
|
1226
|
+
self.import_module('deflate as df')
|
|
1227
|
+
self.import_module('io as _io')
|
|
1228
|
+
|
|
1229
|
+
block_num = 0
|
|
1230
|
+
offset = 0
|
|
1231
|
+
wire_bytes = 0
|
|
1232
|
+
used_compress = False
|
|
1233
|
+
|
|
1234
|
+
while offset < data_size:
|
|
1235
|
+
chunk = data[offset:offset + chunk_size]
|
|
1236
|
+
chunk_len = len(chunk)
|
|
1237
|
+
|
|
1238
|
+
# Pad last chunk to flash block size
|
|
1239
|
+
if chunk_len % flash_block:
|
|
1240
|
+
padding = flash_block - (chunk_len % flash_block)
|
|
1241
|
+
chunk = chunk + b'\xff' * padding
|
|
1242
|
+
|
|
1243
|
+
if compress:
|
|
1244
|
+
compressed = zlib.compress(chunk)
|
|
1245
|
+
comp_b64 = base64.b64encode(compressed).decode('ascii')
|
|
1246
|
+
raw_b64 = base64.b64encode(chunk).decode('ascii')
|
|
1247
|
+
if len(comp_b64) < len(raw_b64) - 20:
|
|
1248
|
+
cmd = f"{part_var}.writeblocks({block_num}, df.DeflateIO(_io.BytesIO(ub.a2b_base64('{comp_b64}'))).read())"
|
|
1249
|
+
wire_bytes += len(comp_b64)
|
|
1250
|
+
used_compress = True
|
|
1251
|
+
else:
|
|
1252
|
+
cmd = f"{part_var}.writeblocks({block_num}, ub.a2b_base64('{raw_b64}'))"
|
|
1253
|
+
wire_bytes += len(raw_b64)
|
|
1254
|
+
else:
|
|
1255
|
+
raw_b64 = base64.b64encode(chunk).decode('ascii')
|
|
1256
|
+
cmd = f"{part_var}.writeblocks({block_num}, ub.a2b_base64('{raw_b64}'))"
|
|
1257
|
+
wire_bytes += len(raw_b64)
|
|
1258
|
+
|
|
1259
|
+
self._mpy_comm.exec(cmd, timeout=30)
|
|
1260
|
+
|
|
1261
|
+
blocks_written = len(chunk) // flash_block
|
|
1262
|
+
block_num += blocks_written
|
|
1263
|
+
offset += chunk_size
|
|
1264
|
+
|
|
1265
|
+
if progress_callback:
|
|
1266
|
+
progress_callback(min(offset, data_size), data_size, wire_bytes)
|
|
1267
|
+
|
|
1268
|
+
return wire_bytes, used_compress
|
|
1269
|
+
|
|
1270
|
+
def ota_write(self, data, progress_callback=None, compress=None):
|
|
1271
|
+
"""Write firmware data to next OTA partition
|
|
1272
|
+
|
|
1273
|
+
Arguments:
|
|
1274
|
+
data: bytes with firmware content (.app-bin)
|
|
1275
|
+
progress_callback: optional callback(transferred, total, wire_bytes)
|
|
1276
|
+
compress: None=auto-detect, True=force, False=disable
|
|
1277
|
+
|
|
1278
|
+
Returns:
|
|
1279
|
+
dict with keys:
|
|
1280
|
+
'target': label of target partition
|
|
1281
|
+
'size': firmware size
|
|
1282
|
+
'wire_bytes': bytes sent over wire
|
|
1283
|
+
'compressed': whether compression was used
|
|
1284
|
+
|
|
1285
|
+
Raises:
|
|
1286
|
+
MpyError: if OTA not available or firmware too large
|
|
1287
|
+
"""
|
|
1288
|
+
try:
|
|
1289
|
+
self.import_module('esp32')
|
|
1290
|
+
except _mpy_comm.CmdError:
|
|
1291
|
+
raise _mpy_comm.MpyError("OTA not available (ESP32 only)")
|
|
1292
|
+
|
|
1293
|
+
try:
|
|
1294
|
+
part_info = self._mpy_comm.exec_eval(
|
|
1295
|
+
"esp32.Partition(esp32.Partition.RUNNING).get_next_update().info()"
|
|
1296
|
+
)
|
|
1297
|
+
except _mpy_comm.CmdError:
|
|
1298
|
+
raise _mpy_comm.MpyError("OTA not available (no OTA partitions)")
|
|
1299
|
+
|
|
1300
|
+
part_type, part_subtype, part_offset, part_size, part_label, _ = part_info
|
|
1301
|
+
fw_size = len(data)
|
|
1302
|
+
|
|
1303
|
+
self._mpy_comm.exec("_part = esp32.Partition(esp32.Partition.RUNNING).get_next_update()")
|
|
1304
|
+
|
|
1305
|
+
wire_bytes, used_compress = self._write_partition_data(
|
|
1306
|
+
'_part', data, fw_size, part_size, progress_callback, compress
|
|
1307
|
+
)
|
|
1308
|
+
|
|
1309
|
+
self._mpy_comm.exec("_part.set_boot()")
|
|
1310
|
+
|
|
1311
|
+
self._mpy_comm.exec("del _part")
|
|
1312
|
+
self.import_module('gc')
|
|
1313
|
+
self._mpy_comm.exec("gc.collect()")
|
|
1314
|
+
|
|
1315
|
+
return {
|
|
1316
|
+
'target': part_label,
|
|
1317
|
+
'offset': part_offset,
|
|
1318
|
+
'size': fw_size,
|
|
1319
|
+
'wire_bytes': wire_bytes,
|
|
1320
|
+
'compressed': used_compress,
|
|
1321
|
+
}
|
|
1322
|
+
|