mpytool 1.2.0__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mpytool/mpy.py CHANGED
@@ -1,8 +1,16 @@
1
1
  """MicroPython tool: main MPY class"""
2
2
 
3
+ import base64
4
+ import zlib
5
+
3
6
  import mpytool.mpy_comm as _mpy_comm
4
7
 
5
8
 
9
+ def _escape_path(path: str) -> str:
10
+ """Escape path for use in Python string literal"""
11
+ return path.replace("\\", "\\\\").replace("'", "\\'")
12
+
13
+
6
14
  class PathNotFound(_mpy_comm.MpyError):
7
15
  """File not found"""
8
16
  def __init__(self, file_name):
@@ -26,79 +34,104 @@ class DirNotFound(PathNotFound):
26
34
 
27
35
 
28
36
  class Mpy():
29
- _CHUNK = 4096
37
+ _CHUNK = 512
38
+ _CHUNK_AUTO_DETECTED = None # Will be set on first put() if not overridden
39
+ _DEFLATE_AVAILABLE = None # None = not checked, True/False = result
30
40
  _ATTR_DIR = 0x4000
31
41
  _ATTR_FILE = 0x8000
42
+ # Helper functions for MicroPython device
43
+ # Using _mt_ prefix (mpytool) to avoid collisions, short var names to minimize transfer
32
44
  _HELPERS = {
33
45
  'stat': f"""
34
- def _mpytool_stat(path):
46
+ def _mt_stat(p):
35
47
  try:
36
- res = os.stat(path)
37
- if res[0] == {_ATTR_DIR}:
38
- return -1
39
- if res[0] == {_ATTR_FILE}:
40
- return res[6]
41
- except:
42
- return None
43
- return None
48
+ s=os.stat(p)
49
+ return -1 if s[0]=={_ATTR_DIR} else s[6] if s[0]=={_ATTR_FILE} else None
50
+ except:return None
44
51
  """,
45
52
  'tree': f"""
46
- def _mpytool_tree(path):
47
- res_dir = []
48
- res_file = []
49
- dir_size = 0
50
- for entry in os.ilistdir(path):
51
- name, attr = entry[:2]
52
- if attr == {_ATTR_FILE}:
53
- size = entry[3]
54
- res_file.append((name, size, None))
55
- dir_size += size
56
- elif attr == {_ATTR_DIR}:
57
- if path in ('', '/'):
58
- sub_path = path + name
59
- else:
60
- sub_path = path + '/' + name
61
- _sub_path, sub_dir_size, sub_tree = _mpytool_tree(sub_path)
62
- res_dir.append((name, sub_dir_size, sub_tree))
63
- dir_size += sub_dir_size
64
- return path, dir_size, res_dir + res_file
53
+ def _mt_tree(p):
54
+ D,F,sz=[],[],0
55
+ for e in os.ilistdir(p):
56
+ n,a=e[:2]
57
+ if a=={_ATTR_FILE}:
58
+ F.append((n,e[3],None));sz+=e[3]
59
+ elif a=={_ATTR_DIR}:
60
+ _,s,t=_mt_tree((p+'/'if p not in('','/')else p)+n)
61
+ D.append((n,s,t));sz+=s
62
+ return p,sz,D+F
65
63
  """,
66
64
  'mkdir': f"""
67
- def _mpytool_mkdir(path):
68
- path = path.rstrip('/')
69
- check_path = ''
70
- found = True
71
- for dir_part in path.split('/'):
72
- if check_path:
73
- check_path += '/'
74
- check_path += dir_part
75
- if found:
65
+ def _mt_mkdir(p):
66
+ p=p.rstrip('/');c='';f=1
67
+ for d in p.split('/'):
68
+ c+='/'+d if c else d
69
+ if f:
76
70
  try:
77
- result = os.stat(check_path)
78
- if result[0] == {_ATTR_FILE}:
79
- return True
71
+ if os.stat(c)[0]=={_ATTR_FILE}:return 1
80
72
  continue
81
- except:
82
- found = False
83
- os.mkdir(check_path)
84
- return False
73
+ except:f=0
74
+ os.mkdir(c)
75
+ return 0
85
76
  """,
86
77
  'rmdir': f"""
87
- def _mpytool_rmdir(path):
88
- for name, attr, _inode, _size in os.ilistdir(path):
89
- if attr == {_ATTR_FILE}:
90
- os.remove(path + '/' + name)
91
- elif attr == {_ATTR_DIR}:
92
- _mpytool_rmdir(path + '/' + name)
93
- os.rmdir(path)
78
+ def _mt_rmdir(p):
79
+ for n,a,_,_ in os.ilistdir(p):
80
+ q=p+'/'+n
81
+ if a=={_ATTR_FILE}:os.remove(q)
82
+ elif a=={_ATTR_DIR}:_mt_rmdir(q)
83
+ os.rmdir(p)
84
+ """,
85
+ '_hash': """
86
+ def _mt_hash(p):
87
+ h=hashlib.sha256()
88
+ with open(p,'rb')as f:
89
+ while 1:
90
+ c=f.read(512)
91
+ if not c:break
92
+ h.update(c)
93
+ return ubinascii.b2a_base64(h.digest()).strip()
94
+ """,
95
+ 'fileinfo': f"""
96
+ def _mt_finfo(files):
97
+ r={{}}
98
+ for p,xsz in files.items():
99
+ try:
100
+ s=os.stat(p)
101
+ if s[0]!={_ATTR_FILE}:r[p]=None;continue
102
+ sz=s[6]
103
+ r[p]=(sz,None)if sz!=xsz else(sz,_mt_hash(p))
104
+ except:r[p]=None
105
+ gc.collect()
106
+ return r
107
+ """,
108
+ 'partition_magic': """
109
+ def _mt_pmagic(label, size=512):
110
+ parts = esp32.Partition.find(esp32.Partition.TYPE_DATA, label=label)
111
+ if not parts:
112
+ return None
113
+ p = parts[0]
114
+ buf = bytearray(size)
115
+ p.readblocks(0, buf)
116
+ # Return magic bytes and block size (ioctl 5)
117
+ return bytes(buf), p.ioctl(5, 0)
118
+ """,
119
+ 'partition_find': """
120
+ def _mt_pfind(label):
121
+ p = esp32.Partition.find(esp32.Partition.TYPE_APP, label=label)
122
+ if not p:
123
+ p = esp32.Partition.find(esp32.Partition.TYPE_DATA, label=label)
124
+ return p[0] if p else None
94
125
  """}
95
126
 
96
- def __init__(self, conn, log=None):
127
+ def __init__(self, conn, log=None, chunk_size=None):
97
128
  self._conn = conn
98
129
  self._log = log
99
130
  self._mpy_comm = _mpy_comm.MpyComm(conn, log=log)
100
131
  self._imported = []
101
132
  self._load_helpers = []
133
+ self._chunk_size = chunk_size # None = auto-detect
134
+ self._platform = None # Cached platform name
102
135
 
103
136
  @property
104
137
  def conn(self):
@@ -112,6 +145,24 @@ def _mpytool_rmdir(path):
112
145
  """
113
146
  return self._mpy_comm
114
147
 
148
+ def reset_state(self):
149
+ """Reset internal state after device reset
150
+
151
+ Call this after soft_reset() or hard_reset() to clear cached state.
152
+ """
153
+ self._imported = []
154
+ self._load_helpers = []
155
+ self._mpy_comm._repl_mode = None
156
+ self._platform = None
157
+ Mpy._CHUNK_AUTO_DETECTED = None
158
+ Mpy._DEFLATE_AVAILABLE = None
159
+
160
+ def _get_platform(self):
161
+ """Get cached platform name (e.g. 'esp32', 'rp2')"""
162
+ if self._platform is None:
163
+ self._platform = self.platform()['platform']
164
+ return self._platform
165
+
115
166
  def load_helper(self, helper):
116
167
  """Load helper function to MicroPython
117
168
 
@@ -147,7 +198,7 @@ def _mpytool_rmdir(path):
147
198
  """
148
199
  self.import_module('os')
149
200
  self.load_helper('stat')
150
- return self._mpy_comm.exec_eval(f"_mpytool_stat('{path}')")
201
+ return self._mpy_comm.exec_eval(f"_mt_stat('{_escape_path(path)}')")
151
202
 
152
203
  def ls(self, path=None):
153
204
  """List files on path
@@ -166,7 +217,7 @@ def _mpytool_rmdir(path):
166
217
  path = ''
167
218
  try:
168
219
  result = self._mpy_comm.exec_eval(
169
- f"tuple(os.ilistdir('{path}'))")
220
+ f"tuple(os.ilistdir('{_escape_path(path)}'))")
170
221
  res_dir = []
171
222
  res_file = []
172
223
  for entry in result:
@@ -199,14 +250,13 @@ def _mpytool_rmdir(path):
199
250
  if path is None:
200
251
  path = ''
201
252
  if path in ('', '.', '/'):
202
- return self._mpy_comm.exec_eval(f"_mpytool_tree('{path}')")
203
- # check if path exists
253
+ return self._mpy_comm.exec_eval(f"_mt_tree('{_escape_path(path)}')")
204
254
  result = self.stat(path)
205
255
  if result is None:
206
256
  raise DirNotFound(path)
207
257
  if result == -1:
208
- return self._mpy_comm.exec_eval(f"_mpytool_tree('{path}')")
209
- return((path, result[6], None))
258
+ return self._mpy_comm.exec_eval(f"_mt_tree('{_escape_path(path)}')")
259
+ return (path, result, None)
210
260
 
211
261
  def mkdir(self, path):
212
262
  """make directory (also create all parents)
@@ -216,7 +266,7 @@ def _mpytool_rmdir(path):
216
266
  """
217
267
  self.import_module('os')
218
268
  self.load_helper('mkdir')
219
- if self._mpy_comm.exec_eval(f"_mpytool_mkdir('{path}')"):
269
+ if self._mpy_comm.exec_eval(f"_mt_mkdir('{_escape_path(path)}')"):
220
270
  raise _mpy_comm.MpyError(f'Error creating directory, this is file: {path}')
221
271
 
222
272
  def delete(self, path):
@@ -231,21 +281,84 @@ def _mpytool_rmdir(path):
231
281
  if result == -1:
232
282
  self.import_module('os')
233
283
  self.load_helper('rmdir')
234
- self._mpy_comm.exec(f"_mpytool_rmdir('{path}')")
284
+ self._mpy_comm.exec(f"_mt_rmdir('{_escape_path(path)}')", 20)
235
285
  else:
236
- self._mpy_comm.exec(f"os.remove('{path}')")
286
+ self._mpy_comm.exec(f"os.remove('{_escape_path(path)}')")
237
287
 
238
- def get(self, path):
288
+ def rename(self, src, dst):
289
+ """Rename/move file or directory
290
+
291
+ Arguments:
292
+ src: source path
293
+ dst: destination path
294
+ """
295
+ self.import_module('os')
296
+ self._mpy_comm.exec(f"os.rename('{_escape_path(src)}', '{_escape_path(dst)}')")
297
+
298
+ def hashfile(self, path):
299
+ """Compute SHA256 hash of file
300
+
301
+ Arguments:
302
+ path: file path
303
+
304
+ Returns:
305
+ bytes with SHA256 hash (32 bytes) or None if hashlib not available
306
+ """
307
+ self.import_module('hashlib')
308
+ self.import_module('ubinascii')
309
+ self.load_helper('_hash')
310
+ try:
311
+ result = self._mpy_comm.exec_eval(f"_mt_hash('{_escape_path(path)}')")
312
+ return base64.b64decode(result) if result else None
313
+ except _mpy_comm.CmdError:
314
+ return None
315
+
316
+ def fileinfo(self, files):
317
+ """Get file info (size and hash) for multiple files in one call
318
+
319
+ Arguments:
320
+ files: dict {path: expected_size} - hash is only computed if sizes match
321
+
322
+ Returns:
323
+ dict {path: (size, hash)} - hash is None if sizes don't match
324
+ dict {path: None} - if file doesn't exist
325
+ Returns None if hashlib not available on device
326
+ """
327
+ self.import_module('os')
328
+ self.import_module('gc')
329
+ self.import_module('hashlib')
330
+ self.import_module('ubinascii')
331
+ self.load_helper('_hash')
332
+ self.load_helper('fileinfo')
333
+ escaped_files = {_escape_path(p): s for p, s in files.items()}
334
+ # Timeout scales with number of files (base 5s + 0.5s per file)
335
+ timeout = 5 + len(files) * 0.5
336
+ try:
337
+ result = self._mpy_comm.exec_eval(f"_mt_finfo({escaped_files})", timeout=timeout)
338
+ for path, info in result.items():
339
+ if info and info[1]:
340
+ result[path] = (info[0], base64.b64decode(info[1]))
341
+ return result
342
+ except _mpy_comm.CmdError:
343
+ return None
344
+
345
+ def get(self, path, progress_callback=None):
239
346
  """Read file
240
347
 
241
348
  Arguments:
242
349
  path: file path to read
350
+ progress_callback: optional callback(transferred, total) for progress
243
351
 
244
352
  Returns:
245
353
  bytes with file content
246
354
  """
355
+ total_size = 0
356
+ if progress_callback:
357
+ total_size = self.stat(path)
358
+ if total_size is None or total_size < 0:
359
+ total_size = 0
247
360
  try:
248
- self._mpy_comm.exec(f"f = open('{path}', 'rb')")
361
+ self._mpy_comm.exec(f"f = open('{_escape_path(path)}', 'rb')")
249
362
  except _mpy_comm.CmdError as err:
250
363
  raise FileNotFound(path) from err
251
364
  data = b''
@@ -254,19 +367,933 @@ def _mpytool_rmdir(path):
254
367
  if not result:
255
368
  break
256
369
  data += result
370
+ if progress_callback:
371
+ progress_callback(len(data), total_size)
257
372
  self._mpy_comm.exec("f.close()")
258
373
  return data
259
374
 
260
- def put(self, data, path):
261
- """Read file
375
+ def _encode_chunk(self, chunk, compress=False):
376
+ """Encode chunk for transfer - choose smallest representation
377
+
378
+ Arguments:
379
+ chunk: bytes to encode
380
+ compress: whether to try compression
381
+
382
+ Returns:
383
+ tuple (command_string, original_chunk_size, encoding_type)
384
+ encoding_type is 'raw', 'base64', or 'compressed'
385
+ """
386
+ chunk_size = len(chunk)
387
+ raw = repr(chunk)
388
+ raw_len = len(raw)
389
+
390
+ b64 = base64.b64encode(chunk).decode('ascii')
391
+ b64_cmd = f"ub.a2b_base64('{b64}')"
392
+ b64_len = len(b64_cmd)
393
+
394
+ best_cmd = raw
395
+ best_len = raw_len
396
+ best_type = 'raw'
397
+
398
+ if b64_len < best_len:
399
+ best_cmd = b64_cmd
400
+ best_len = b64_len
401
+ best_type = 'base64'
402
+
403
+ if compress:
404
+ compressed = zlib.compress(chunk)
405
+ comp_b64 = base64.b64encode(compressed).decode('ascii')
406
+ comp_cmd = f"df.DeflateIO(_io.BytesIO(ub.a2b_base64('{comp_b64}'))).read()"
407
+ comp_len = len(comp_cmd)
408
+ if comp_len < best_len:
409
+ best_cmd = comp_cmd
410
+ best_len = comp_len
411
+ best_type = 'compressed'
412
+
413
+ return best_cmd, chunk_size, best_type
414
+
415
+ def _detect_chunk_size(self):
416
+ """Detect optimal chunk size based on device free RAM
417
+
418
+ Returns:
419
+ chunk size in bytes (512, 1024, 2048, 4096, 8192, 16384, or 32768)
420
+ """
421
+ if self._chunk_size is not None:
422
+ return self._chunk_size
423
+ if Mpy._CHUNK_AUTO_DETECTED is not None:
424
+ return Mpy._CHUNK_AUTO_DETECTED
425
+ self.import_module('gc')
426
+ self._mpy_comm.exec("gc.collect()")
427
+ try:
428
+ free = self._mpy_comm.exec_eval("gc.mem_free()")
429
+ except _mpy_comm.CmdError:
430
+ free = 0
431
+ # Select chunk size based on free RAM (~10-15% of free RAM)
432
+ if free > 256 * 1024:
433
+ chunk = 32768
434
+ elif free > 128 * 1024:
435
+ chunk = 16384
436
+ elif free > 64 * 1024:
437
+ chunk = 8192
438
+ elif free > 48 * 1024:
439
+ chunk = 4096
440
+ elif free > 32 * 1024:
441
+ chunk = 2048
442
+ elif free > 24 * 1024:
443
+ chunk = 1024
444
+ else:
445
+ chunk = 512
446
+ Mpy._CHUNK_AUTO_DETECTED = chunk
447
+ return chunk
448
+
449
+ def _detect_deflate(self):
450
+ """Detect if deflate module is available and device has enough RAM
451
+
452
+ Returns:
453
+ True if deflate is available and RAM >= 64KB, False otherwise
454
+ """
455
+ if Mpy._DEFLATE_AVAILABLE is None:
456
+ # Check RAM first - need at least 64KB for decompression
457
+ chunk = self._detect_chunk_size()
458
+ if chunk < 8192: # chunk < 8K means RAM <= 64KB
459
+ Mpy._DEFLATE_AVAILABLE = False
460
+ else:
461
+ try:
462
+ self._mpy_comm.exec("import deflate")
463
+ Mpy._DEFLATE_AVAILABLE = True
464
+ except _mpy_comm.CmdError:
465
+ Mpy._DEFLATE_AVAILABLE = False
466
+ return Mpy._DEFLATE_AVAILABLE
467
+
468
+ def put(self, data, path, progress_callback=None, compress=None):
469
+ """Write file to device
262
470
 
263
471
  Arguments:
264
472
  data: bytes with file content
265
473
  path: file path to write
474
+ progress_callback: optional callback(transferred, total) for progress
475
+ compress: None=auto-detect, True=force compression, False=disable
476
+
477
+ Returns:
478
+ tuple (encodings_used, wire_bytes) where:
479
+ encodings_used: set of encoding types ('raw', 'base64', 'compressed')
480
+ wire_bytes: number of bytes sent over the wire (encoded size)
266
481
  """
267
- self._mpy_comm.exec(f"f = open('{path}', 'wb')")
482
+ chunk_size = self._detect_chunk_size()
483
+ total_size = len(data)
484
+ transferred = 0
485
+ wire_bytes = 0
486
+ encodings_used = set()
487
+
488
+ if compress is None:
489
+ compress = self._detect_deflate()
490
+
491
+ self.import_module('ubinascii as ub')
492
+ if compress:
493
+ self.import_module('deflate as df')
494
+ self.import_module('io as _io')
495
+
496
+ self._mpy_comm.exec(f"f = open('{_escape_path(path)}', 'wb')")
268
497
  while data:
269
- chunk = data[:self._CHUNK]
270
- count = self._mpy_comm.exec_eval(f"f.write({chunk})", timeout=10)
271
- data = data[count:]
498
+ chunk = data[:chunk_size]
499
+ cmd, orig_size, enc_type = self._encode_chunk(chunk, compress)
500
+ encodings_used.add(enc_type)
501
+ # Wire bytes = command overhead (9 = "f.write(" + ")") + encoded data
502
+ wire_bytes += 9 + len(cmd)
503
+ count = self._mpy_comm.exec_eval(f"f.write({cmd})", timeout=10)
504
+ data = data[orig_size:]
505
+ transferred += orig_size
506
+ if progress_callback:
507
+ progress_callback(transferred, total_size)
272
508
  self._mpy_comm.exec("f.close()")
509
+ # Run garbage collection to free memory and allow flash to settle
510
+ self.import_module('gc')
511
+ self._mpy_comm.exec("gc.collect()")
512
+ return encodings_used, wire_bytes
513
+
514
+ def platform(self):
515
+ """Get platform information
516
+
517
+ Returns:
518
+ dict with keys:
519
+ 'platform': platform name (e.g. 'esp32')
520
+ 'version': MicroPython version string
521
+ 'impl': implementation name (e.g. 'micropython')
522
+ 'machine': machine description (or None)
523
+ """
524
+ self.import_module('sys')
525
+ self.import_module('os')
526
+
527
+ platform = self._mpy_comm.exec_eval("repr(sys.platform)")
528
+ version = self._mpy_comm.exec_eval("repr(sys.version)")
529
+ impl = self._mpy_comm.exec_eval("repr(sys.implementation.name)")
530
+
531
+ try:
532
+ uname = self._mpy_comm.exec_eval("tuple(os.uname())")
533
+ machine = uname[4] if len(uname) > 4 else None
534
+ except _mpy_comm.CmdError:
535
+ machine = None
536
+
537
+ return {
538
+ 'platform': platform,
539
+ 'version': version,
540
+ 'impl': impl,
541
+ 'machine': machine,
542
+ }
543
+
544
+ def memory(self):
545
+ """Get memory (RAM) information
546
+
547
+ Returns:
548
+ dict with keys:
549
+ 'free': free RAM in bytes
550
+ 'alloc': allocated RAM in bytes
551
+ 'total': total RAM in bytes
552
+ """
553
+ self.import_module('gc')
554
+ self._mpy_comm.exec("gc.collect()")
555
+
556
+ mem_free = self._mpy_comm.exec_eval("gc.mem_free()")
557
+ mem_alloc = self._mpy_comm.exec_eval("gc.mem_alloc()")
558
+
559
+ return {
560
+ 'free': mem_free,
561
+ 'alloc': mem_alloc,
562
+ 'total': mem_free + mem_alloc,
563
+ }
564
+
565
+ def unique_id(self):
566
+ """Get device unique ID (serial number)
567
+
568
+ Returns:
569
+ hex string or None if not available
570
+ """
571
+ try:
572
+ return self._mpy_comm.exec_eval(
573
+ "repr(__import__('machine').unique_id().hex())"
574
+ )
575
+ except _mpy_comm.CmdError:
576
+ return None
577
+
578
+ def mac_addresses(self):
579
+ """Get network MAC addresses
580
+
581
+ Returns:
582
+ list of (interface_name, mac_address) tuples
583
+ """
584
+ addresses = []
585
+ try:
586
+ self.import_module('network')
587
+ try:
588
+ mac = self._mpy_comm.exec_eval(
589
+ "repr(network.WLAN(network.STA_IF).config('mac').hex(':'))"
590
+ )
591
+ addresses.append(('WiFi', mac))
592
+ except _mpy_comm.CmdError:
593
+ pass
594
+ try:
595
+ mac = self._mpy_comm.exec_eval(
596
+ "repr(network.WLAN(network.AP_IF).config('mac').hex(':'))"
597
+ )
598
+ if not addresses or mac != addresses[0][1]:
599
+ addresses.append(('WiFi AP', mac))
600
+ except _mpy_comm.CmdError:
601
+ pass
602
+ try:
603
+ mac = self._mpy_comm.exec_eval(
604
+ "repr(network.LAN().config('mac').hex(':'))"
605
+ )
606
+ addresses.append(('LAN', mac))
607
+ except _mpy_comm.CmdError:
608
+ pass
609
+ except _mpy_comm.CmdError:
610
+ pass
611
+ return addresses
612
+
613
+ def filesystems(self):
614
+ """Get filesystem information
615
+
616
+ Returns:
617
+ list of dicts with keys: mount, total, free, used
618
+ """
619
+ self.import_module('os')
620
+ result = []
621
+
622
+ try:
623
+ fs_stat = self._mpy_comm.exec_eval("os.statvfs('/')")
624
+ fs_total = fs_stat[0] * fs_stat[2]
625
+ fs_free = fs_stat[0] * fs_stat[3]
626
+ if fs_total > 0:
627
+ result.append({
628
+ 'mount': '/',
629
+ 'total': fs_total,
630
+ 'free': fs_free,
631
+ 'used': fs_total - fs_free,
632
+ })
633
+ except _mpy_comm.CmdError:
634
+ pass
635
+
636
+ # Check subdirectories for additional mount points
637
+ try:
638
+ root_dirs = self._mpy_comm.exec_eval(
639
+ "[d[0] for d in os.ilistdir('/') if d[1] == 0x4000]"
640
+ )
641
+ for dirname in root_dirs:
642
+ try:
643
+ path = '/' + dirname
644
+ sub_stat = self._mpy_comm.exec_eval(f"os.statvfs('{path}')")
645
+ sub_total = sub_stat[0] * sub_stat[2]
646
+ sub_free = sub_stat[0] * sub_stat[3]
647
+ # Skip if same as root or zero size
648
+ if sub_total == 0 or any(f['total'] == sub_total for f in result):
649
+ continue
650
+ result.append({
651
+ 'mount': path,
652
+ 'total': sub_total,
653
+ 'free': sub_free,
654
+ 'used': sub_total - sub_free,
655
+ })
656
+ except _mpy_comm.CmdError:
657
+ pass
658
+ except _mpy_comm.CmdError:
659
+ pass
660
+
661
+ return result
662
+
663
+ def info(self):
664
+ """Get all device information (convenience method)
665
+
666
+ Returns:
667
+ dict combining platform(), memory(), unique_id(),
668
+ mac_addresses() and filesystems()
669
+ """
670
+ result = self.platform()
671
+ result['unique_id'] = self.unique_id()
672
+ result['mac_addresses'] = self.mac_addresses()
673
+ result.update({f'mem_{k}': v for k, v in self.memory().items()})
674
+ result['filesystems'] = self.filesystems()
675
+ return result
676
+
677
+ _PART_TYPES = {0: 'app', 1: 'data'}
678
+ _PART_SUBTYPES = {
679
+ # App subtypes (type 0)
680
+ 0: {0: 'factory', 16: 'ota_0', 17: 'ota_1', 18: 'ota_2', 19: 'ota_3', 32: 'test'},
681
+ # Data subtypes (type 1)
682
+ 1: {0: 'ota', 1: 'phy', 2: 'nvs', 3: 'coredump', 4: 'nvs_keys',
683
+ 5: 'efuse', 128: 'esphttpd', 129: 'fat', 130: 'spiffs', 131: 'littlefs'},
684
+ }
685
+ # Subtypes that can contain a filesystem (for auto-detection)
686
+ _FS_SUBTYPES = {129, 130, 131} # fat, spiffs, littlefs
687
+
688
+ def _detect_fs_from_magic(self, magic):
689
+ """Detect filesystem type and details from magic bytes.
690
+
691
+ Args:
692
+ magic: First 512 bytes from partition/flash (boot sector)
693
+
694
+ Returns:
695
+ dict with keys:
696
+ 'type': filesystem type ('littlefs2', 'fat16', 'fat32', 'exfat', None)
697
+ 'block_size': block/cluster size in bytes (if detected)
698
+ 'label': volume label (if detected)
699
+ or None if not enough data
700
+ """
701
+ if len(magic) < 16:
702
+ return None
703
+
704
+ result = {'type': None, 'block_size': None, 'label': None}
705
+
706
+ # LittleFS v2: "littlefs" string at offset 8
707
+ # Note: LittleFS uses inline metadata format, block_size is not at fixed offset
708
+ # We detect the filesystem type but can't reliably get block_size from magic
709
+ if magic[8:16] == b'littlefs':
710
+ result['type'] = 'littlefs2'
711
+ # Block size must be obtained from device (ioctl) or partition info
712
+ return result
713
+
714
+ # Check for FAT boot sector signature (need 512 bytes)
715
+ if len(magic) >= 512:
716
+ # Boot sector signature at 510-511
717
+ if magic[510:512] == b'\x55\xAA':
718
+ import struct
719
+ # Bytes per sector (offset 11-12)
720
+ bytes_per_sector = struct.unpack('<H', magic[11:13])[0]
721
+ # Sectors per cluster (offset 13)
722
+ sectors_per_cluster = magic[13]
723
+ result['block_size'] = bytes_per_sector * sectors_per_cluster
724
+
725
+ # Check for exFAT first (has "EXFAT " at offset 3)
726
+ if magic[3:11] == b'EXFAT ':
727
+ result['type'] = 'exfat'
728
+ return result
729
+
730
+ # FAT type string location differs between FAT16 and FAT32
731
+ # FAT16: "FAT16 " at offset 54
732
+ # FAT32: "FAT32 " at offset 82
733
+ if magic[54:62] == b'FAT16 ':
734
+ result['type'] = 'fat16'
735
+ # Volume label at offset 43 (11 bytes)
736
+ label = magic[43:54].rstrip(b' \x00').decode('ascii', errors='ignore')
737
+ if label and label != 'NO NAME':
738
+ result['label'] = label
739
+ elif magic[82:90] == b'FAT32 ':
740
+ result['type'] = 'fat32'
741
+ # Volume label at offset 71 (11 bytes)
742
+ label = magic[71:82].rstrip(b' \x00').decode('ascii', errors='ignore')
743
+ if label and label != 'NO NAME':
744
+ result['label'] = label
745
+ elif magic[54:59] == b'FAT12':
746
+ result['type'] = 'fat12'
747
+ else:
748
+ # Generic FAT (can't determine type)
749
+ result['type'] = 'fat'
750
+ return result
751
+
752
+ return result if result['type'] else None
753
+
754
+ def _read_partition_magic(self, label, size=512):
755
+ """Read first bytes from partition for filesystem detection.
756
+
757
+ Args:
758
+ label: Partition label
759
+ size: Number of bytes to read
760
+
761
+ Returns:
762
+ tuple (magic_bytes, block_size) or None if read fails
763
+ """
764
+ try:
765
+ self.load_helper('partition_magic')
766
+ return self._mpy_comm.exec_eval(f"_mt_pmagic('{label}', {size})")
767
+ except _mpy_comm.CmdError:
768
+ return None
769
+
770
+ def partitions(self):
771
+ """Get ESP32 partition information
772
+
773
+ Returns:
774
+ dict with keys:
775
+ 'partitions': list of partition info dicts with keys:
776
+ label, type, type_name, subtype, subtype_name,
777
+ offset, size, encrypted, running
778
+ 'running': label of currently running partition
779
+ 'boot': label of boot partition
780
+ 'next_ota': label of next OTA partition (or None)
781
+ 'next_ota_size': size of next OTA partition (or None)
782
+
783
+ Raises:
784
+ MpyError: if not ESP32 or partition module not available
785
+ """
786
+ try:
787
+ self.import_module('esp32')
788
+ except _mpy_comm.CmdError:
789
+ raise _mpy_comm.MpyError("Partition info not available (ESP32 only)")
790
+
791
+ running = self._mpy_comm.exec_eval(
792
+ "repr(esp32.Partition(esp32.Partition.RUNNING).info()[4])"
793
+ )
794
+
795
+ raw_parts = self._mpy_comm.exec_eval(
796
+ "[p.info() for p in "
797
+ "esp32.Partition.find(esp32.Partition.TYPE_APP) + "
798
+ "esp32.Partition.find(esp32.Partition.TYPE_DATA)]"
799
+ )
800
+
801
+ partitions = []
802
+ next_ota_size = None
803
+ for ptype, subtype, offset, size, label, encrypted in raw_parts:
804
+ type_name = self._PART_TYPES.get(ptype, str(ptype))
805
+ subtype_name = self._PART_SUBTYPES.get(ptype, {}).get(subtype, str(subtype))
806
+ part_info = {
807
+ 'label': label,
808
+ 'type': ptype,
809
+ 'type_name': type_name,
810
+ 'subtype': subtype,
811
+ 'subtype_name': subtype_name,
812
+ 'offset': offset,
813
+ 'size': size,
814
+ 'encrypted': encrypted,
815
+ 'running': label == running,
816
+ 'filesystem': None,
817
+ 'fs_block_size': None,
818
+ }
819
+ # Detect actual filesystem for data partitions with FS subtypes
820
+ if ptype == 1 and subtype in self._FS_SUBTYPES: # TYPE_DATA
821
+ result = self._read_partition_magic(label)
822
+ if result:
823
+ magic, block_size = result
824
+ part_info['fs_block_size'] = block_size
825
+ fs_info = self._detect_fs_from_magic(magic)
826
+ if fs_info:
827
+ part_info['filesystem'] = fs_info.get('type')
828
+ # For FAT, use cluster size from magic; for others use partition block size
829
+ if fs_info.get('block_size') and 'fat' in (fs_info.get('type') or ''):
830
+ part_info['fs_cluster_size'] = fs_info.get('block_size')
831
+ partitions.append(part_info)
832
+
833
+ try:
834
+ boot = self._mpy_comm.exec_eval(
835
+ "repr(esp32.Partition(esp32.Partition.BOOT).info()[4])"
836
+ )
837
+ except _mpy_comm.CmdError:
838
+ boot = None
839
+
840
+ # Get next OTA partition (get size and label separately to handle string eval)
841
+ try:
842
+ next_ota_size = self._mpy_comm.exec_eval(
843
+ "esp32.Partition(esp32.Partition.RUNNING).get_next_update().info()[3]"
844
+ )
845
+ next_ota = self._mpy_comm.exec_eval(
846
+ "repr(esp32.Partition(esp32.Partition.RUNNING).get_next_update().info()[4])"
847
+ )
848
+ except _mpy_comm.CmdError:
849
+ next_ota = None
850
+ next_ota_size = None
851
+
852
+ return {
853
+ 'partitions': partitions,
854
+ 'running': running,
855
+ 'boot': boot,
856
+ 'next_ota': next_ota,
857
+ 'next_ota_size': next_ota_size,
858
+ }
859
+
860
+ def flash_info(self):
861
+ """Get RP2 flash information
862
+
863
+ Returns:
864
+ dict with keys:
865
+ 'size': total flash size in bytes
866
+ 'block_size': block size in bytes
867
+ 'block_count': number of blocks
868
+ 'filesystem': detected filesystem type ('littlefs2', 'fat', 'unknown')
869
+
870
+ Raises:
871
+ MpyError: if not RP2 or rp2.Flash not available
872
+ """
873
+ try:
874
+ self.import_module('rp2')
875
+ except _mpy_comm.CmdError as err:
876
+ raise _mpy_comm.MpyError("Flash info not available (RP2 only)") from err
877
+
878
+ # Get flash info via ioctl
879
+ # ioctl(4) = block count, ioctl(5) = block size
880
+ self._mpy_comm.exec("_f = rp2.Flash()")
881
+ info = self._mpy_comm.exec_eval("(_f.ioctl(4, 0), _f.ioctl(5, 0))")
882
+ block_count, block_size = info
883
+ size = block_count * block_size
884
+
885
+ # Read first 512 bytes for filesystem detection
886
+ self._mpy_comm.exec("_b = bytearray(512); _f.readblocks(0, _b)")
887
+ magic = self._mpy_comm.exec_eval("bytes(_b)")
888
+
889
+ # Use common filesystem detection
890
+ fs_info = self._detect_fs_from_magic(magic)
891
+ fs_type = fs_info.get('type') if fs_info else None
892
+ fs_block_size = fs_info.get('block_size') if fs_info else None
893
+
894
+ return {
895
+ 'size': size,
896
+ 'block_size': block_size,
897
+ 'block_count': block_count,
898
+ 'filesystem': fs_type or 'unknown',
899
+ 'fs_block_size': fs_block_size,
900
+ 'magic': magic[:16],
901
+ }
902
+
903
+ def flash_read(self, label=None, progress_callback=None):
904
+ """Read flash/partition content
905
+
906
+ Arguments:
907
+ label: partition label (ESP32) or None (RP2 entire user flash)
908
+ progress_callback: optional callback(transferred, total)
909
+
910
+ Returns:
911
+ bytes with flash/partition content
912
+
913
+ Raises:
914
+ MpyError: if wrong platform or partition not found
915
+ """
916
+ platform = self._get_platform()
917
+ self.import_module('ubinascii as ub')
918
+
919
+ if label:
920
+ # ESP32 partition
921
+ if platform != 'esp32':
922
+ raise _mpy_comm.MpyError("Partition label requires ESP32")
923
+ self.import_module('esp32')
924
+ self.load_helper('partition_find')
925
+ try:
926
+ part_info = self._mpy_comm.exec_eval(f"_mt_pfind('{label}').info()")
927
+ except _mpy_comm.CmdError:
928
+ raise _mpy_comm.MpyError(f"Partition '{label}' not found")
929
+ _, _, _, total_size, _, _ = part_info
930
+ block_size = 4096
931
+ self._mpy_comm.exec(f"_dev = _mt_pfind('{label}')")
932
+ else:
933
+ # RP2 flash
934
+ if platform != 'rp2':
935
+ raise _mpy_comm.MpyError("Flash read without label requires RP2")
936
+ self.import_module('rp2')
937
+ self._mpy_comm.exec("_dev = rp2.Flash()")
938
+ info = self._mpy_comm.exec_eval("(_dev.ioctl(4, 0), _dev.ioctl(5, 0))")
939
+ block_count, block_size = info
940
+ total_size = block_count * block_size
941
+
942
+ total_blocks = (total_size + block_size - 1) // block_size
943
+ chunk_blocks = 8 # 32KB per iteration
944
+ data = bytearray()
945
+ block_num = 0
946
+
947
+ while block_num < total_blocks:
948
+ blocks_to_read = min(chunk_blocks, total_blocks - block_num)
949
+ bytes_to_read = blocks_to_read * block_size
950
+ self._mpy_comm.exec(
951
+ f"_buf=bytearray({bytes_to_read}); _dev.readblocks({block_num}, _buf)")
952
+ b64_data = self._mpy_comm.exec_eval("repr(ub.b2a_base64(_buf).decode())")
953
+ chunk = base64.b64decode(b64_data)
954
+ data.extend(chunk)
955
+ block_num += blocks_to_read
956
+ if progress_callback:
957
+ progress_callback(min(block_num * block_size, total_size), total_size)
958
+
959
+ self._mpy_comm.exec("del _dev")
960
+ return bytes(data[:total_size])
961
+
962
+ def flash_write(self, data, label=None, progress_callback=None, compress=None):
963
+ """Write data to flash/partition
964
+
965
+ WARNING: This will overwrite the filesystem! Use with caution.
966
+
967
+ Arguments:
968
+ data: bytes to write (will be padded to block size)
969
+ label: partition label (ESP32) or None (RP2 entire user flash)
970
+ progress_callback: optional callback(transferred, total) for RP2,
971
+ callback(transferred, total, wire_bytes) for ESP32
972
+ compress: None=auto-detect, True=force, False=disable (ESP32 only)
973
+
974
+ Returns:
975
+ dict with keys: 'size', 'written', and for ESP32: 'wire_bytes', 'compressed'
976
+
977
+ Raises:
978
+ MpyError: if wrong platform, data too large, or partition not found
979
+ """
980
+ platform = self._get_platform()
981
+
982
+ if label:
983
+ # ESP32 partition - use _write_partition_data for compression support
984
+ if platform != 'esp32':
985
+ raise _mpy_comm.MpyError("Partition label requires ESP32")
986
+ self.import_module('esp32')
987
+ self.load_helper('partition_find')
988
+ try:
989
+ part_info = self._mpy_comm.exec_eval(f"_mt_pfind('{label}').info()")
990
+ except _mpy_comm.CmdError:
991
+ raise _mpy_comm.MpyError(f"Partition '{label}' not found")
992
+ _, _, _, part_size, part_label, _ = part_info
993
+ self._mpy_comm.exec(f"_dev = _mt_pfind('{label}')")
994
+
995
+ wire_bytes, used_compress = self._write_partition_data(
996
+ '_dev', data, len(data), part_size, progress_callback, compress)
997
+
998
+ self._mpy_comm.exec("del _dev")
999
+ self.import_module('gc')
1000
+ self._mpy_comm.exec("gc.collect()")
1001
+
1002
+ return {
1003
+ 'size': part_size,
1004
+ 'written': len(data),
1005
+ 'wire_bytes': wire_bytes,
1006
+ 'compressed': used_compress,
1007
+ }
1008
+ else:
1009
+ # RP2 flash - simple block write
1010
+ if platform != 'rp2':
1011
+ raise _mpy_comm.MpyError("Flash write without label requires RP2")
1012
+ self.import_module('rp2')
1013
+ self._mpy_comm.exec("_dev = rp2.Flash()")
1014
+ info = self._mpy_comm.exec_eval("(_dev.ioctl(4, 0), _dev.ioctl(5, 0))")
1015
+ block_count, block_size = info
1016
+ total_size = block_count * block_size
1017
+
1018
+ if len(data) > total_size:
1019
+ raise _mpy_comm.MpyError(
1020
+ f"Data too large: {len(data)} bytes, flash size: {total_size} bytes")
1021
+
1022
+ self.import_module('ubinascii as ub')
1023
+
1024
+ # Pad data to block size
1025
+ if len(data) % block_size:
1026
+ padding = block_size - (len(data) % block_size)
1027
+ data = data + b'\xff' * padding
1028
+
1029
+ chunk_blocks = 8 # 32KB per iteration
1030
+ block_num = 0
1031
+ total_blocks = len(data) // block_size
1032
+
1033
+ while block_num < total_blocks:
1034
+ blocks_to_write = min(chunk_blocks, total_blocks - block_num)
1035
+ offset = block_num * block_size
1036
+ chunk = data[offset:offset + blocks_to_write * block_size]
1037
+ b64_chunk = base64.b64encode(chunk).decode('ascii')
1038
+ self._mpy_comm.exec(f"_buf=ub.a2b_base64('{b64_chunk}')")
1039
+ self._mpy_comm.exec(f"_dev.writeblocks({block_num}, _buf)")
1040
+ block_num += blocks_to_write
1041
+ if progress_callback:
1042
+ progress_callback(block_num * block_size, len(data))
1043
+
1044
+ self._mpy_comm.exec("del _dev")
1045
+ return {
1046
+ 'size': total_size,
1047
+ 'written': len(data),
1048
+ }
1049
+
1050
+ def flash_erase(self, label=None, full=False, progress_callback=None):
1051
+ """Erase flash/partition
1052
+
1053
+ Arguments:
1054
+ label: partition label (ESP32) or None (RP2 entire user flash)
1055
+ full: if True, erase entire flash/partition; if False, erase first 2 blocks
1056
+ progress_callback: optional callback(transferred, total)
1057
+
1058
+ Returns:
1059
+ dict with keys: 'erased', and 'label' for ESP32
1060
+
1061
+ Raises:
1062
+ MpyError: if wrong platform or partition not found
1063
+ """
1064
+ platform = self._get_platform()
1065
+
1066
+ if label:
1067
+ # ESP32 partition
1068
+ if platform != 'esp32':
1069
+ raise _mpy_comm.MpyError("Partition label requires ESP32")
1070
+ self.import_module('esp32')
1071
+ self.load_helper('partition_find')
1072
+ try:
1073
+ part_info = self._mpy_comm.exec_eval(f"_mt_pfind('{label}').info()")
1074
+ except _mpy_comm.CmdError:
1075
+ raise _mpy_comm.MpyError(f"Partition '{label}' not found")
1076
+ _, _, _, part_size, _, _ = part_info
1077
+ block_size = 4096
1078
+ total_blocks = part_size // block_size
1079
+ self._mpy_comm.exec(f"_dev = _mt_pfind('{label}')")
1080
+ else:
1081
+ # RP2 flash
1082
+ if platform != 'rp2':
1083
+ raise _mpy_comm.MpyError("Flash erase without label requires RP2")
1084
+ self.import_module('rp2')
1085
+ self._mpy_comm.exec("_dev = rp2.Flash()")
1086
+ info = self._mpy_comm.exec_eval("(_dev.ioctl(4, 0), _dev.ioctl(5, 0))")
1087
+ total_blocks, block_size = info
1088
+
1089
+ if full:
1090
+ blocks_to_erase = total_blocks
1091
+ else:
1092
+ blocks_to_erase = min(2, total_blocks) # First 2 blocks for FS reset
1093
+
1094
+ total_bytes = blocks_to_erase * block_size
1095
+
1096
+ # Prepare empty block buffer on device
1097
+ self._mpy_comm.exec(f"_buf = b'\\xff' * {block_size}")
1098
+
1099
+ for block_num in range(blocks_to_erase):
1100
+ self._mpy_comm.exec(f"_dev.writeblocks({block_num}, _buf)")
1101
+ if progress_callback:
1102
+ progress_callback((block_num + 1) * block_size, total_bytes)
1103
+
1104
+ self._mpy_comm.exec("del _dev")
1105
+
1106
+ result = {'erased': total_bytes}
1107
+ if label:
1108
+ result['label'] = label
1109
+ return result
1110
+
1111
+ def soft_reset(self):
1112
+ """Soft reset device (Ctrl-D in REPL)
1113
+
1114
+ Runs boot.py and main.py after reset.
1115
+ """
1116
+ self._mpy_comm.soft_reset()
1117
+ self.reset_state()
1118
+
1119
+ def soft_reset_raw(self):
1120
+ """Soft reset in raw REPL mode
1121
+
1122
+ Clears RAM but doesn't run boot.py/main.py.
1123
+ """
1124
+ self._mpy_comm.soft_reset_raw()
1125
+ self.reset_state()
1126
+
1127
+ def machine_reset(self, reconnect=True):
1128
+ """MCU reset using machine.reset()
1129
+
1130
+ Arguments:
1131
+ reconnect: if True, attempt to reconnect after reset
1132
+
1133
+ Returns:
1134
+ True if reconnected successfully, False otherwise
1135
+
1136
+ Note: For USB-CDC ports, the port may disappear and reappear.
1137
+ """
1138
+ self._mpy_comm.enter_raw_repl()
1139
+ self._conn.write(b"import machine; machine.reset()\x04")
1140
+ self.reset_state()
1141
+ if reconnect:
1142
+ self._conn.reconnect()
1143
+ return True
1144
+ return False
1145
+
1146
+ def machine_bootloader(self):
1147
+ """Enter bootloader using machine.bootloader()
1148
+
1149
+ Note: Connection will be lost after this call.
1150
+ """
1151
+ self._mpy_comm.enter_raw_repl()
1152
+ self._conn.write(b"import machine; machine.bootloader()\x04")
1153
+ self.reset_state()
1154
+
1155
+ def hard_reset(self):
1156
+ """Hardware reset using RTS signal (serial only)
1157
+
1158
+ Raises:
1159
+ NotImplementedError: if connection doesn't support hardware reset
1160
+ """
1161
+ self._conn.hard_reset()
1162
+ self.reset_state()
1163
+
1164
+ def reset_to_bootloader(self):
1165
+ """Enter bootloader using DTR/RTS signals (ESP32 serial only)
1166
+
1167
+ Raises:
1168
+ NotImplementedError: if connection doesn't support this
1169
+ """
1170
+ self._conn.reset_to_bootloader()
1171
+ self.reset_state()
1172
+
1173
+ def _write_partition_data(
1174
+ self, part_var, data, data_size, part_size,
1175
+ progress_callback=None, compress=None):
1176
+ """Write data to partition (shared implementation)
1177
+
1178
+ Arguments:
1179
+ part_var: variable name holding partition on device (e.g. '_part')
1180
+ data: bytes to write
1181
+ data_size: size of data
1182
+ part_size: partition size (for validation)
1183
+ progress_callback: optional callback(transferred, total, wire_bytes)
1184
+ compress: None=auto-detect, True=force, False=disable
1185
+
1186
+ Returns:
1187
+ tuple: (wire_bytes, used_compress)
1188
+ """
1189
+ if data_size > part_size:
1190
+ raise _mpy_comm.MpyError(
1191
+ f"Data too large: {data_size} > {part_size} bytes"
1192
+ )
1193
+
1194
+ if compress is None:
1195
+ compress = self._detect_deflate()
1196
+
1197
+ flash_block = 4096
1198
+ chunk_size = self._detect_chunk_size()
1199
+ chunk_size = max(flash_block, (chunk_size // flash_block) * flash_block)
1200
+
1201
+ self.import_module('ubinascii as ub')
1202
+ if compress:
1203
+ self.import_module('deflate as df')
1204
+ self.import_module('io as _io')
1205
+
1206
+ block_num = 0
1207
+ offset = 0
1208
+ wire_bytes = 0
1209
+ used_compress = False
1210
+
1211
+ while offset < data_size:
1212
+ chunk = data[offset:offset + chunk_size]
1213
+ chunk_len = len(chunk)
1214
+
1215
+ # Pad last chunk to flash block size
1216
+ if chunk_len % flash_block:
1217
+ padding = flash_block - (chunk_len % flash_block)
1218
+ chunk = chunk + b'\xff' * padding
1219
+
1220
+ if compress:
1221
+ compressed = zlib.compress(chunk)
1222
+ comp_b64 = base64.b64encode(compressed).decode('ascii')
1223
+ raw_b64 = base64.b64encode(chunk).decode('ascii')
1224
+ if len(comp_b64) < len(raw_b64) - 20:
1225
+ cmd = f"{part_var}.writeblocks({block_num}, df.DeflateIO(_io.BytesIO(ub.a2b_base64('{comp_b64}'))).read())"
1226
+ wire_bytes += len(comp_b64)
1227
+ used_compress = True
1228
+ else:
1229
+ cmd = f"{part_var}.writeblocks({block_num}, ub.a2b_base64('{raw_b64}'))"
1230
+ wire_bytes += len(raw_b64)
1231
+ else:
1232
+ raw_b64 = base64.b64encode(chunk).decode('ascii')
1233
+ cmd = f"{part_var}.writeblocks({block_num}, ub.a2b_base64('{raw_b64}'))"
1234
+ wire_bytes += len(raw_b64)
1235
+
1236
+ self._mpy_comm.exec(cmd, timeout=30)
1237
+
1238
+ blocks_written = len(chunk) // flash_block
1239
+ block_num += blocks_written
1240
+ offset += chunk_size
1241
+
1242
+ if progress_callback:
1243
+ progress_callback(min(offset, data_size), data_size, wire_bytes)
1244
+
1245
+ return wire_bytes, used_compress
1246
+
1247
+ def ota_write(self, data, progress_callback=None, compress=None):
1248
+ """Write firmware data to next OTA partition
1249
+
1250
+ Arguments:
1251
+ data: bytes with firmware content (.app-bin)
1252
+ progress_callback: optional callback(transferred, total, wire_bytes)
1253
+ compress: None=auto-detect, True=force, False=disable
1254
+
1255
+ Returns:
1256
+ dict with keys:
1257
+ 'target': label of target partition
1258
+ 'size': firmware size
1259
+ 'wire_bytes': bytes sent over wire
1260
+ 'compressed': whether compression was used
1261
+
1262
+ Raises:
1263
+ MpyError: if OTA not available or firmware too large
1264
+ """
1265
+ try:
1266
+ self.import_module('esp32')
1267
+ except _mpy_comm.CmdError:
1268
+ raise _mpy_comm.MpyError("OTA not available (ESP32 only)")
1269
+
1270
+ try:
1271
+ part_info = self._mpy_comm.exec_eval(
1272
+ "esp32.Partition(esp32.Partition.RUNNING).get_next_update().info()"
1273
+ )
1274
+ except _mpy_comm.CmdError:
1275
+ raise _mpy_comm.MpyError("OTA not available (no OTA partitions)")
1276
+
1277
+ part_type, part_subtype, part_offset, part_size, part_label, _ = part_info
1278
+ fw_size = len(data)
1279
+
1280
+ self._mpy_comm.exec("_part = esp32.Partition(esp32.Partition.RUNNING).get_next_update()")
1281
+
1282
+ wire_bytes, used_compress = self._write_partition_data(
1283
+ '_part', data, fw_size, part_size, progress_callback, compress
1284
+ )
1285
+
1286
+ self._mpy_comm.exec("_part.set_boot()")
1287
+
1288
+ self._mpy_comm.exec("del _part")
1289
+ self.import_module('gc')
1290
+ self._mpy_comm.exec("gc.collect()")
1291
+
1292
+ return {
1293
+ 'target': part_label,
1294
+ 'offset': part_offset,
1295
+ 'size': fw_size,
1296
+ 'wire_bytes': wire_bytes,
1297
+ 'compressed': used_compress,
1298
+ }
1299
+