dwipe 1.0.5__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dwipe/WipeJob.py ADDED
@@ -0,0 +1,1243 @@
1
+ """
2
+ WipeJob class for handling disk/partition wiping operations
3
+
4
+ """
5
+ # pylint: disable=broad-exception-raised,broad-exception-caught
6
+ import os
7
+ import json
8
+ import time
9
+ import threading
10
+ import random
11
+ import traceback
12
+ import subprocess
13
+ import mmap
14
+ from types import SimpleNamespace
15
+
16
+ from .Utils import Utils
17
+
18
+
19
+ class WipeJob:
20
+ """Handles disk/partition wiping operations with progress tracking"""
21
+
22
+ # O_DIRECT requires aligned buffers and write sizes
23
+ BLOCK_SIZE = 4096 # Alignment requirement for O_DIRECT
24
+ WRITE_SIZE = 1 * 1024 * 1024 # 1MB (must be multiple of BLOCK_SIZE)
25
+ BUFFER_SIZE = WRITE_SIZE # Same size for O_DIRECT
26
+
27
+ # Marker constants (separate from O_DIRECT writes)
28
+ MARKER_SIZE = 16 * 1024 # 16KB for marker
29
+ STATE_OFFSET = 15 * 1024 # where json is written (for marker buffer)
30
+
31
+ # Aligned buffers allocated with mmap (initialized at module load)
32
+ buffer = None # Random data buffer (memoryview)
33
+ buffer_mem = None # Underlying mmap object
34
+ zero_buffer = None # Zero buffer (memoryview)
35
+ zero_buffer_mem = None # Underlying mmap object
36
+
37
+ @staticmethod
38
+ def _get_dirty_kb():
39
+ """Read Dirty pages from /proc/meminfo (in KB)"""
40
+ try:
41
+ with open('/proc/meminfo', 'r', encoding='utf-8') as f:
42
+ for line in f:
43
+ if line.startswith('Dirty:'):
44
+ return int(line.split()[1])
45
+ except Exception:
46
+ pass
47
+ return 0
48
+
49
+ @staticmethod
50
+ def _get_total_memory_mb():
51
+ """Read total system memory from /proc/meminfo (in MB)"""
52
+ try:
53
+ with open('/proc/meminfo', 'r', encoding='utf-8') as f:
54
+ for line in f:
55
+ if line.startswith('MemTotal:'):
56
+ # Value is in KB, convert to MB
57
+ return int(line.split()[1]) // 1024
58
+ except Exception:
59
+ pass
60
+ return 8192 # Default to 8GB if we can't read
61
+
62
+ @staticmethod
63
+ def _rebalance_buffer(buffer):
64
+ """Rebalance byte distribution to be more uniform
65
+
66
+ Takes random data and redistributes bytes to make the distribution
67
+ more uniform, which helps with chi-squared verification while maintaining
68
+ cryptographic randomness.
69
+ """
70
+ # Count byte frequencies
71
+ byte_counts = [0] * 256
72
+ for byte in buffer:
73
+ byte_counts[byte] += 1
74
+
75
+ # Calculate expected frequency (uniform distribution)
76
+ expected = len(buffer) / 256
77
+
78
+ # Find overused and underused bytes
79
+ overused = [] # (byte_value, excess_count)
80
+ underused = [] # (byte_value, deficit_count)
81
+
82
+ for byte_val in range(256):
83
+ count = byte_counts[byte_val]
84
+ diff = count - expected
85
+ if diff > expected * 0.05: # More than 5% over expected
86
+ overused.append((byte_val, int(diff * 0.7))) # Replace 70% of excess
87
+ elif diff < -expected * 0.05: # More than 5% under expected
88
+ underused.append((byte_val, int(-diff * 0.7))) # Add to 70% of deficit
89
+
90
+ if not overused or not underused:
91
+ return buffer # Already well-balanced
92
+
93
+ # Create a mutable copy
94
+ result = bytearray(buffer)
95
+
96
+ # Build replacement plan
97
+ replacements_made = 0
98
+ target_replacements = min(sum(count for _, count in overused),
99
+ sum(count for _, count in underused))
100
+
101
+ # Randomly replace overused bytes with underused bytes
102
+ underused_idx = 0
103
+ for overused_byte, excess in overused:
104
+ if replacements_made >= target_replacements:
105
+ break
106
+
107
+ # Find positions of this overused byte
108
+ positions = [i for i, b in enumerate(result) if b == overused_byte]
109
+
110
+ # Replace randomly selected positions
111
+ replace_count = min(excess, target_replacements - replacements_made)
112
+ positions_to_replace = random.sample(positions, min(replace_count, len(positions)))
113
+
114
+ for pos in positions_to_replace:
115
+ if underused_idx >= len(underused):
116
+ break
117
+ underused_byte, deficit = underused[underused_idx]
118
+ result[pos] = underused_byte
119
+ replacements_made += 1
120
+
121
+ # Move to next underused byte when we've used up this one's deficit
122
+ if replacements_made % max(1, deficit) == 0:
123
+ underused_idx += 1
124
+
125
+ return bytes(result)
126
+
127
+ def __init__(self, device_path, total_size, opts=None, resume_from=0, resume_mode=None):
128
+ self.opts = opts if opts else SimpleNamespace(dry_run=False)
129
+ self.device_path = device_path
130
+ self.total_size = total_size
131
+ self.do_abort = False
132
+ self.thread = None
133
+
134
+ self.start_mono = time.monotonic() # Track the start time
135
+ self.total_written = resume_from # Start from resumed offset if resuming
136
+ self.resume_from = resume_from # Track resume offset
137
+ self.resume_mode = resume_mode # Original mode if resuming (overrides opts.wipe_mode)
138
+ self.wr_hists = [] # list of (mono, written)
139
+ self.done = False
140
+ self.exception = None # in case of issues
141
+
142
+ # Multi-pass tracking
143
+ self.passes = getattr(opts, 'passes', 1) # Total number of passes to perform
144
+ self.current_pass = 0 # Current pass number (0-indexed)
145
+
146
+ # Verification tracking
147
+ self.verify_phase = False # True when verifying
148
+ self.verify_start_mono = None # Start time of verify phase
149
+ self.verify_progress = 0 # Bytes verified so far
150
+ self.verify_pct = 0 # Percentage of disk being verified (e.g., 2 for 2%)
151
+ self.verify_result = None # "zeroed", "random", "not-wiped", "mixed"
152
+
153
+ self.expected_pattern = None # "zero" or "random" for what we wrote
154
+ self.is_verify_only = False # True if this is a standalone verify job
155
+
156
+ # Periodic marker updates for crash recovery
157
+ self.last_marker_update_mono = time.monotonic() - 25 # Last time we wrote progress marker
158
+ self.marker_update_interval = 30 # Update marker every 30 seconds
159
+
160
+ ## SLOWDOWN / STALL DETECTION/ABORT FEATURE
161
+ ##
162
+ self.slowdown_stop = getattr(opts, 'slowdown_stop', 16)
163
+ self.stall_timeout = getattr(opts, 'stall_timeout', 60)
164
+ self.max_slowdown_ratio = 0
165
+ self.max_stall_secs = 0
166
+ # Initialize tracking variables
167
+ self.baseline_speed = None # Bytes per second
168
+ self.baseline_end_mono = None # When baseline measurement ended
169
+ # Stall tracking
170
+ self.last_progress_mono = time.monotonic() # Last time we made progress
171
+ self.last_progress_written = resume_from # Bytes written at last progress check
172
+ # For periodic slowdown checks (every 10 seconds)
173
+ self.last_slowdown_check = 0
174
+ # Initialize write history for speed calculation
175
+ self.wr_hists.append(SimpleNamespace(mono=self.start_mono, written=resume_from))
176
+
177
+ # ERROR ABORT FEATURE
178
+ self.max_consecutive_errors = 3 # a control
179
+ self.max_total_errors = 100 # a control
180
+ self.reopen_on_error = True # a control
181
+ self.reopen_count = 0 # cumulative (info only)
182
+ self.total_errors = 0 # cumulative
183
+
184
+
185
+
186
+ @staticmethod
187
+ def start_job(device_path, total_size, opts):
188
+ """Start a wipe job in a background thread
189
+
190
+ If an existing marker with stopped state is found, resumes from that offset.
191
+ Uses current passes setting to determine target bytes (may differ from original).
192
+
193
+ Smart resume logic for multi-pass:
194
+ - If resuming in the final pass, checks pattern on disk
195
+ - If pattern matches expected for final pass, resumes from current position
196
+ - If pattern doesn't match, restarts final pass from beginning
197
+ This prevents mixed patterns (e.g., random then zeros) in the final result.
198
+ """
199
+ resume_from = 0
200
+ resume_mode = None
201
+
202
+ # Check for existing marker to resume from
203
+ device_name = device_path.replace('/dev/', '')
204
+ existing_marker = WipeJob.read_marker_buffer(device_name)
205
+ if existing_marker and existing_marker.scrubbed_bytes > 0:
206
+ scrubbed = existing_marker.scrubbed_bytes
207
+ resume_mode = existing_marker.mode # Use original mode ('Rand' or 'Zero')
208
+ passes = getattr(opts, 'passes', 1)
209
+
210
+ # Check if the marker indicates a completed wipe
211
+ # Old markers without 'passes' field: assume 1 pass
212
+ marker_passes = getattr(existing_marker, 'passes', 1)
213
+ marker_target = total_size * marker_passes
214
+
215
+ if scrubbed >= marker_target:
216
+ # Wipe was completed with marker_passes - user wants to re-wipe
217
+ # Start fresh from 0
218
+ resume_from = 0
219
+ resume_mode = None
220
+ else:
221
+ # Partial/stopped wipe - resume from where it left off
222
+ resume_from = scrubbed
223
+ # Ensure we don't resume in the marker area
224
+ if resume_from < WipeJob.MARKER_SIZE:
225
+ resume_from = WipeJob.MARKER_SIZE
226
+ # Also ensure not past the end (sanity check)
227
+ if resume_from > total_size * getattr(opts, 'passes', 1):
228
+ resume_from = 0 # Start over if marker corrupted
229
+
230
+ # Partial/stopped wipe - resume from where it left off
231
+ # Smart resume: check if we're in the final pass
232
+ current_pass = scrubbed // total_size
233
+ last_pass_num = passes - 1
234
+
235
+ if current_pass >= last_pass_num:
236
+ # Resuming in final pass - check pattern on disk matches expected
237
+ # Create temporary job to use get_pass_pattern method
238
+ temp_job = WipeJob(device_path, total_size, opts)
239
+ temp_job.passes = passes
240
+ expected_is_random = temp_job.get_pass_pattern(last_pass_num, resume_mode)
241
+
242
+ # Detect actual pattern on disk
243
+ actual_is_random = WipeJob.detect_pattern_on_disk(device_path)
244
+
245
+ if expected_is_random != actual_is_random:
246
+ # Pattern mismatch - restart final pass from beginning
247
+ resume_from = last_pass_num * total_size
248
+ else:
249
+ # Pattern matches - resume from current position
250
+ resume_from = scrubbed
251
+
252
+ job = WipeJob(device_path=device_path, total_size=total_size, opts=opts,
253
+ resume_from=resume_from, resume_mode=resume_mode)
254
+ job.thread = threading.Thread(target=job.write_partition)
255
+ job.thread.start()
256
+ return job
257
+
258
+ @staticmethod
259
+ def start_verify_job(device_path, total_size, opts, expected_pattern=None):
260
+ """Start a verification-only job in a background thread
261
+
262
+ Args:
263
+ device_path: Path to device (e.g., '/dev/sda1')
264
+ total_size: Total size in bytes
265
+ opts: Options namespace with verify_pct
266
+ expected_pattern: "zeroed", "random", or None (auto-detect)
267
+ """
268
+ job = WipeJob(device_path=device_path, total_size=total_size, opts=opts)
269
+ job.is_verify_only = True # Mark as standalone verification job
270
+ job.expected_pattern = expected_pattern
271
+ verify_pct = getattr(opts, 'verify_pct', 0)
272
+ if verify_pct == 0:
273
+ verify_pct = 2 # Default to 2% if not set
274
+
275
+ # Initialize verify state BEFORE starting thread to avoid showing "0%"
276
+ job.verify_pct = verify_pct
277
+ job.verify_start_mono = time.monotonic()
278
+ job.verify_progress = 0
279
+ job.wr_hists = [SimpleNamespace(mono=job.verify_start_mono, written=0)]
280
+ job.verify_phase = True # Set before thread starts
281
+
282
+ def verify_runner():
283
+ try:
284
+ # Read existing marker to determine the mode and expected pattern
285
+ device_name = os.path.basename(device_path)
286
+ existing_marker = WipeJob.read_marker_buffer(device_name)
287
+ if existing_marker:
288
+ # Infer expected pattern from marker if not already set
289
+ if job.expected_pattern is None:
290
+ job.expected_pattern = "random" if existing_marker.mode == 'Rand' else "zeroed"
291
+
292
+ job.verify_partition(verify_pct)
293
+
294
+ # Write marker with verification status
295
+ if existing_marker:
296
+ is_random = existing_marker.mode == 'Rand'
297
+ job._write_marker_with_verify_status(is_random)
298
+ # Note: _write_marker_with_verify_status sets job.done in its finally block
299
+ else:
300
+ # No marker - just mark as done
301
+ job.done = True
302
+ except Exception:
303
+ job.exception = traceback.format_exc()
304
+ finally:
305
+ # ALWAYS ensure job is marked as done, even if exception or early return
306
+ if not job.done:
307
+ job.done = True
308
+
309
+ job.thread = threading.Thread(target=verify_runner)
310
+ job.thread.start()
311
+ return job
312
+
313
+ def _check_for_stall(self, current_monotonic):
314
+ """Check for stall (no progress) - called frequently"""
315
+ if self.stall_timeout <= 0:
316
+ return False
317
+
318
+ time_since_progress = current_monotonic - self.last_progress_mono
319
+ self.max_stall_secs = max(time_since_progress, self.max_stall_secs)
320
+ if time_since_progress >= self.stall_timeout:
321
+ self.do_abort = True
322
+ self.exception = f"Stall detected: No progress for {time_since_progress:.1f} seconds"
323
+ return True
324
+
325
+ return False
326
+
327
+ def _check_for_slowdown(self, current_monotonic):
328
+ """Check for slowdown - called every 10 seconds"""
329
+ if self.slowdown_stop <= 0 or self.baseline_speed is None or self.baseline_speed <= 0:
330
+ return False
331
+
332
+ # Calculate current speed over last 30 seconds
333
+ floor = current_monotonic - 30
334
+ recent_history = [h for h in self.wr_hists if h.mono >= floor]
335
+
336
+ if len(recent_history) >= 2:
337
+ recent_start = recent_history[0]
338
+ recent_written = self.total_written - recent_start.written
339
+ recent_elapsed = current_monotonic - recent_start.mono
340
+
341
+ if recent_elapsed > 1.0:
342
+ current_speed = recent_written / recent_elapsed
343
+ self.baseline_speed = max(self.baseline_speed, current_speed)
344
+ slowdown_ratio = self.baseline_speed / max(current_speed, 1)
345
+ slowdown_ratio = int(round(slowdown_ratio, 0))
346
+ self.max_slowdown_ratio = max(self.max_slowdown_ratio, slowdown_ratio)
347
+
348
+ if slowdown_ratio > self.slowdown_stop:
349
+ self.do_abort = True
350
+ self.exception = (f"Slowdown abort: ({Utils.human(current_speed)}B/s)"
351
+ f" is 1/{slowdown_ratio} baseline")
352
+ return True
353
+
354
+ return False
355
+
356
+ def _update_baseline_if_needed(self, current_monotonic):
357
+ """Update baseline speed measurement if still in first 60 seconds"""
358
+ if self.baseline_speed is not None:
359
+ return # Baseline already established
360
+
361
+ if (current_monotonic - self.start_mono) >= 60:
362
+ total_written_60s = self.total_written - self.resume_from
363
+ elapsed_60s = current_monotonic - self.start_mono
364
+ if elapsed_60s > 0:
365
+ self.baseline_speed = total_written_60s / elapsed_60s
366
+ self.baseline_end_mono = current_monotonic
367
+ self.last_slowdown_check = current_monotonic # Start slowdown checking
368
+
369
+
370
+
371
+ def get_status_str(self):
372
+ """Get human-readable status string"""
373
+ elapsed_time = time.monotonic() - self.start_mono
374
+ write_rate = self.total_written / elapsed_time if elapsed_time > 0 else 0
375
+ percent_complete = (self.total_written / self.total_size) * 100
376
+ return (f"Write rate: {write_rate / (1024 * 1024):.2f} MB/s, "
377
+ f"Completed: {percent_complete:.2f}%")
378
+
379
+ def get_status(self):
380
+ """Get status tuple: (elapsed, percent, rate, eta)
381
+
382
+ Returns stats for current phase only:
383
+ - Write phase (0-100%): elapsed/rate/eta for writing
384
+ - Flushing phase: 100% FLUSH while kernel syncs to device
385
+ - Verify phase (v0-v100%): elapsed/rate/eta for verification only
386
+ """
387
+ pct_str, rate_str, when_str = '', '', ''
388
+ mono = time.monotonic()
389
+
390
+ if self.verify_phase:
391
+ # Verification phase: v0% to v100%
392
+ if self.verify_start_mono is None:
393
+ self.verify_start_mono = mono
394
+
395
+ elapsed_time = mono - self.verify_start_mono
396
+ progress = self.verify_progress
397
+
398
+ # Calculate total bytes to verify (verify_pct% of total_size)
399
+ if self.verify_pct > 0:
400
+ total_to_verify = self.total_size * self.verify_pct / 100
401
+ else:
402
+ total_to_verify = self.total_size
403
+
404
+ # Calculate verification percentage (0-100)
405
+ pct = int((progress / total_to_verify) * 100) if total_to_verify > 0 else 0
406
+ pct_str = f'v{pct}%'
407
+
408
+ if self.do_abort:
409
+ pct_str = 'STOP'
410
+
411
+ # Track verification progress for rate calculation
412
+ self.wr_hists.append(SimpleNamespace(mono=mono, written=progress))
413
+ floor = mono - 30
414
+ while len(self.wr_hists) >= 3 and self.wr_hists[1].mono >= floor:
415
+ del self.wr_hists[0]
416
+ delta_mono = mono - self.wr_hists[0].mono
417
+ physical_rate = (progress - self.wr_hists[0].written) / delta_mono if delta_mono > 1.0 else 0
418
+ # Scale rate to show "effective" verification rate (as if verifying 100% of disk)
419
+ effective_rate = physical_rate * (100 / self.verify_pct) if self.verify_pct > 0 else physical_rate
420
+ rate_str = f'{Utils.human(int(round(effective_rate, 0)))}/s'
421
+
422
+ if physical_rate > 0:
423
+ remaining = total_to_verify - progress
424
+ when = int(round(remaining / physical_rate))
425
+ when_str = Utils.ago_str(when)
426
+ else:
427
+ when_str = '0'
428
+
429
+ return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str
430
+ else:
431
+ # Write phase: 0-100% (across all passes)
432
+ written = self.total_written
433
+ elapsed_time = mono - self.start_mono
434
+
435
+ # Calculate progress across all passes
436
+ # total_written represents cumulative bytes across all passes
437
+ total_work = self.total_size * self.passes
438
+ pct = (self.total_written / total_work) * 100 if total_work > 0 else 0
439
+ # Cap at 100% (can exceed if passes changed during resume)
440
+ pct = min(pct, 100)
441
+ pct_str = f'{int(round(pct))}%'
442
+ if self.do_abort:
443
+ pct_str = 'STOP'
444
+
445
+ # Calculate rate using sliding window to avoid RAM buffering inflation
446
+ # Track write progress history
447
+ self.wr_hists.append(SimpleNamespace(mono=mono, written=written))
448
+ floor = mono - 30 # 30 second window
449
+ while len(self.wr_hists) >= 3 and self.wr_hists[1].mono >= floor:
450
+ del self.wr_hists[0]
451
+
452
+ # Calculate rate from sliding window
453
+ delta_mono = mono - self.wr_hists[0].mono
454
+ rate = (written - self.wr_hists[0].written) / delta_mono if delta_mono > 1.0 else 0
455
+
456
+ rate_str = f'{Utils.human(int(round(rate, 0)))}/s'
457
+
458
+ if rate > 0:
459
+ # ETA based on total remaining work across all passes
460
+ remaining_work = total_work - self.total_written
461
+ when = int(round(remaining_work / rate))
462
+ when_str = Utils.ago_str(when)
463
+ else:
464
+ when_str = '0'
465
+
466
+ return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str
467
+
468
+ def prep_marker_buffer(self, is_random, verify_status=None):
469
+ """Get the 1st 16KB to write:
470
+ - 15K zeros
471
+ - JSON status + zero fill to 1KB
472
+
473
+ Marker format (JSON):
474
+ - unixtime: Unix timestamp when marker was written
475
+ - scrubbed_bytes: Total bytes written (including all passes)
476
+ - size_bytes: Total device size in bytes
477
+ - passes: Number of passes intended/completed
478
+ - mode: 'Rand' or 'Zero' (final desired pattern)
479
+ - verify_status: 'pass', 'fail', or omitted (not verified)
480
+
481
+ Args:
482
+ is_random: bool, whether random data was written
483
+ verify_status: str, "pass", "fail", or None (not verified)
484
+ """
485
+ data = {"unixtime": int(time.time()),
486
+ "scrubbed_bytes": self.total_written,
487
+ "size_bytes": self.total_size,
488
+ "passes": self.passes,
489
+ "mode": 'Rand' if is_random else 'Zero'
490
+ }
491
+ if verify_status is not None:
492
+ data["verify_status"] = verify_status
493
+ json_data = json.dumps(data).encode('utf-8')
494
+ buffer = bytearray(self.MARKER_SIZE) # Only 16KB, not 1MB
495
+ buffer[:self.STATE_OFFSET] = b'\x00' * self.STATE_OFFSET
496
+ buffer[self.STATE_OFFSET:self.STATE_OFFSET + len(json_data)] = json_data
497
+ remaining_size = self.MARKER_SIZE - (self.STATE_OFFSET + len(json_data))
498
+ buffer[self.STATE_OFFSET + len(json_data):] = b'\x00' * remaining_size
499
+ return buffer
500
+
501
+ def get_pass_pattern(self, pass_number, desired_mode):
502
+ """Determine what pattern to write for a given pass
503
+
504
+ For multi-pass (>1), alternates patterns ending on desired:
505
+ - 4-pass Rand: Zero, Rand, Zero, Rand
506
+ - 4-pass Zero: Rand, Zero, Rand, Zero
507
+ - 2-pass Rand: Zero, Rand
508
+ - 2-pass Zero: Rand, Zero
509
+
510
+ Args:
511
+ pass_number: 0-indexed pass number
512
+ desired_mode: 'Rand' or 'Zero' - the final desired pattern
513
+
514
+ Returns:
515
+ bool: True for random, False for zeros
516
+ """
517
+ if self.passes == 1:
518
+ # Single pass: just write desired pattern
519
+ return desired_mode == 'Rand'
520
+
521
+ # Multi-pass: alternate patterns, ending on desired
522
+ # Final pass is always desired pattern
523
+ if pass_number == self.passes - 1:
524
+ return desired_mode == 'Rand'
525
+
526
+ # Earlier passes: alternate, starting with opposite
527
+ # If desired is Rand: pass 0=Zero, 1=Rand, 2=Zero, 3=Rand
528
+ # If desired is Zero: pass 0=Rand, 1=Zero, 2=Rand, 3=Zero
529
+ if desired_mode == 'Rand':
530
+ # Even passes (0, 2, ...) = Zero, odd (1, 3, ...) = Rand
531
+ return pass_number % 2 == 1
532
+ else:
533
+ # Even passes (0, 2, ...) = Rand, odd (1, 3, ...) = Zero
534
+ return pass_number % 2 == 0
535
+
536
+ def maybe_update_marker(self, is_random):
537
+ """Periodically update marker to enable crash recovery
538
+
539
+ Updates marker every marker_update_interval seconds (default 30s).
540
+ This allows resume to work even after crashes, power loss, or kill -9.
541
+
542
+ Args:
543
+ is_random: bool, whether random data is being written
544
+
545
+ Returns:
546
+ None
547
+ """
548
+ now_mono = time.monotonic()
549
+ if now_mono - self.last_marker_update_mono < self.marker_update_interval:
550
+ return # Not time yet
551
+
552
+ # Marker writes use separate file handle (buffered I/O, not O_DIRECT)
553
+ # because marker buffer is not aligned
554
+ try:
555
+ with open(self.device_path, 'r+b') as marker_file:
556
+ marker_file.seek(0)
557
+ marker_file.write(self.prep_marker_buffer(is_random))
558
+ self.last_marker_update_mono = now_mono
559
+ except Exception:
560
+ # If marker update fails, just continue - we'll try again in 30s
561
+ pass
562
+
563
+ @staticmethod
564
+ def detect_pattern_on_disk(device_path, sample_size=16*1024):
565
+ """Read a sample from disk after header to detect zeros vs random
566
+
567
+ Reads sample_size bytes starting at WRITE_SIZE (after the marker area).
568
+ Used for smart resume to determine if we're in the middle of a Zero or Rand pass.
569
+
570
+ Args:
571
+ device_path: Path to device (e.g., '/dev/sda1')
572
+ sample_size: Number of bytes to sample (default 16KB)
573
+
574
+ Returns:
575
+ bool: True if random pattern detected, False if zeros detected
576
+ """
577
+ try:
578
+ with open(device_path, 'rb') as device:
579
+ # Skip past the marker area (first 16KB)
580
+ device.seek(WipeJob.WRITE_SIZE)
581
+ data = device.read(sample_size)
582
+
583
+ if not data:
584
+ return False # Can't read, assume zeros
585
+
586
+ # Check if all zeros
587
+ non_zero_count = sum(1 for byte in data if byte != 0)
588
+ # If less than 1% non-zero bytes, consider it zeros
589
+ if non_zero_count < len(data) * 0.01:
590
+ return False # Zeros detected
591
+
592
+ # Otherwise, assume random
593
+ return True
594
+ except Exception:
595
+ return False # Error reading, assume zeros
596
+
597
+ def _get_device_major_minor(self):
598
+ """Get major:minor device numbers for the device
599
+
600
+ Returns:
601
+ str: "major:minor" or None if unable to determine
602
+ """
603
+ try:
604
+ stat_info = os.stat(self.device_path)
605
+ major = os.major(stat_info.st_rdev)
606
+ minor = os.minor(stat_info.st_rdev)
607
+ return f"{major}:{minor}"
608
+ except Exception:
609
+ return None
610
+
611
+ def _setup_ionice(self):
612
+ """Setup I/O priority to best-effort class, lowest priority"""
613
+ try:
614
+ # Class 2 = best-effort, priority 7 = lowest (0 is highest, 7 is lowest)
615
+ subprocess.run(["ionice", "-c", "2", "-n", "7", "-p", str(os.getpid())],
616
+ capture_output=True, check=False)
617
+ except Exception:
618
+ pass
619
+
620
+ @staticmethod
621
+ def read_marker_buffer(device_name):
622
+ """Open the device and read the first 16 KB"""
623
+ try:
624
+ with open(f'/dev/{device_name}', 'rb') as device:
625
+ device.seek(0)
626
+ buffer = device.read(WipeJob.MARKER_SIZE)
627
+ except Exception:
628
+ return None # cannot find info
629
+
630
+ if buffer[:WipeJob.STATE_OFFSET] != b'\x00' * (WipeJob.STATE_OFFSET):
631
+ return None # First 15 KB are not zeros
632
+
633
+ # Extract JSON data from the next 1 KB Strip trailing zeros
634
+ json_data_bytes = buffer[WipeJob.STATE_OFFSET:WipeJob.MARKER_SIZE].rstrip(b'\x00')
635
+
636
+ if not json_data_bytes:
637
+ return None # No JSON data found
638
+
639
+ # Deserialize the JSON data
640
+ try:
641
+ data = json.loads(json_data_bytes.decode('utf-8'))
642
+ except (json.JSONDecodeError, Exception):
643
+ return None # Invalid JSON data!
644
+
645
+ rv = {}
646
+ for key, value in data.items():
647
+ if key in ('unixtime', 'scrubbed_bytes', 'size_bytes', 'passes') and isinstance(value, int):
648
+ rv[key] = value
649
+ elif key in ('mode', 'verify_status') and isinstance(value, str):
650
+ rv[key] = value
651
+ else:
652
+ return None # bogus data
653
+ # Old markers: 4 fields (no passes, no verify_status)
654
+ # New markers: 5 fields minimum (with passes), 6 with verify_status
655
+ if len(rv) < 4 or len(rv) > 6:
656
+ return None # bogus data
657
+ return SimpleNamespace(**rv)
658
+
659
+ def write_partition(self):
660
+ """Writes random chunks to a device and updates the progress status.
661
+
662
+ Performs multiple passes if self.passes > 1 with alternating patterns:
663
+ - Multi-pass Rand: Zero, Rand, Zero, Rand (ends on Rand)
664
+ - Multi-pass Zero: Rand, Zero, Rand, Zero (ends on Zero)
665
+ If verify_pct > 0, automatically starts verification after write completes.
666
+ Supports resuming from a previous stopped wipe with current passes setting.
667
+ """
668
+ # Use resume_mode if set (from existing marker), otherwise current mode
669
+ mode_to_use = self.resume_mode if self.resume_mode else self.opts.wipe_mode.replace('+V', '')
670
+ desired_mode = mode_to_use # 'Rand' or 'Zero' - the final desired pattern
671
+ self.expected_pattern = "random" if desired_mode == 'Rand' else "zeroed"
672
+
673
+ # Calculate target bytes based on current passes setting
674
+ target_bytes = self.passes * self.total_size
675
+
676
+ # If already at or beyond target (user reduced passes), mark as done
677
+ if self.total_written >= target_bytes:
678
+ self.done = True
679
+ return
680
+
681
+ try:
682
+ # Set low I/O priority to be nice to other system processes
683
+ self._setup_ionice()
684
+
685
+ # Open device with O_DIRECT for unbuffered I/O (bypasses page cache)
686
+ # O_DIRECT gives maximum performance with zero dirty pages
687
+ if not self.opts.dry_run:
688
+ fd = os.open(self.device_path, os.O_WRONLY | os.O_DIRECT)
689
+ else:
690
+ fd = None
691
+
692
+ try:
693
+ # Continue writing until we reach target_bytes
694
+ while self.total_written < target_bytes and not self.do_abort:
695
+ # Calculate current pass and offset within pass
696
+ self.current_pass = self.total_written // self.total_size
697
+ offset_in_pass = self.total_written % self.total_size
698
+
699
+ # SKIP MARKER AREA - don't overwrite it!
700
+ if offset_in_pass < self.MARKER_SIZE:
701
+ self.total_written += self.MARKER_SIZE - offset_in_pass
702
+ offset_in_pass = self.MARKER_SIZE
703
+
704
+ # Determine pattern for this pass (alternating for multi-pass)
705
+ is_random_pass = self.get_pass_pattern(self.current_pass, desired_mode)
706
+
707
+ # Seek to current position (O_DIRECT requires block-aligned seeks)
708
+ if not self.opts.dry_run:
709
+ os.lseek(fd, offset_in_pass, os.SEEK_SET)
710
+
711
+ # Write until end of current pass or target_bytes, whichever comes first
712
+ pass_remaining = self.total_size - offset_in_pass
713
+ total_remaining = target_bytes - self.total_written
714
+ bytes_to_write_this_pass = min(pass_remaining, total_remaining)
715
+
716
+ pass_bytes_written = 0
717
+
718
+ while pass_bytes_written < bytes_to_write_this_pass and not self.do_abort:
719
+ current_mono = time.monotonic()
720
+
721
+ # Update baseline if needed (first 60 seconds)
722
+ self._update_baseline_if_needed(current_mono)
723
+
724
+ # Check for stall (frequently)
725
+ if self._check_for_stall(current_mono):
726
+ break
727
+
728
+ # Check for slowdown (every 10 seconds)
729
+ if self.baseline_speed is not None:
730
+ time_since_last_check = current_mono - self.last_slowdown_check
731
+ if time_since_last_check >= 10:
732
+ if self._check_for_slowdown(current_mono):
733
+ break
734
+ self.last_slowdown_check = current_mono
735
+
736
+ # Update progress tracking
737
+ if self.total_written > self.last_progress_written:
738
+ self.last_progress_mono = current_mono
739
+
740
+
741
+ # Calculate chunk size (must be block-aligned for O_DIRECT)
742
+ remaining = bytes_to_write_this_pass - pass_bytes_written
743
+ chunk_size = min(WipeJob.WRITE_SIZE, remaining)
744
+ # Round down to block boundary
745
+ chunk_size = (chunk_size // WipeJob.BLOCK_SIZE) * WipeJob.BLOCK_SIZE
746
+ if chunk_size == 0:
747
+ break
748
+
749
+ # Select buffer based on pass type
750
+ if is_random_pass:
751
+ # Use slice of random buffer (still aligned via memoryview)
752
+ chunk = WipeJob.buffer[:chunk_size]
753
+ else:
754
+ # Use zero buffer
755
+ chunk = WipeJob.zero_buffer[:chunk_size]
756
+
757
+ if self.opts.dry_run:
758
+ bytes_written = chunk_size
759
+ time.sleep(0.001)
760
+ else:
761
+ try:
762
+ # Write with O_DIRECT (bypasses page cache)
763
+ bytes_written, fd = self.safe_write(fd, chunk)
764
+ except Exception as e:
765
+ # Save exception for debugging
766
+ self.exception = str(e)
767
+ self.do_abort = True
768
+ bytes_written = 0
769
+
770
+ self.total_written += bytes_written
771
+ pass_bytes_written += bytes_written
772
+
773
+ # Periodically update marker for crash recovery (every 30s)
774
+ # Note: marker writes use separate buffered file handle
775
+ if not self.opts.dry_run and self.total_written > self.MARKER_SIZE:
776
+ marker_is_random = (desired_mode == 'Rand')
777
+ self.maybe_update_marker(marker_is_random)
778
+
779
+ # Check for errors or incomplete writes
780
+ if bytes_written < chunk_size:
781
+ break
782
+
783
+ # O_DIRECT has no dirty pages - close is instant
784
+ finally:
785
+ # Close device file descriptor
786
+ if fd is not None:
787
+ os.close(fd)
788
+
789
+ # Write final marker buffer at beginning after ALL passes complete
790
+ # Skip marker write on abort to avoid blocking on problematic devices
791
+ # Use separate buffered file handle (marker is not O_DIRECT aligned)
792
+ if not self.opts.dry_run and self.total_written > 0 and not self.do_abort:
793
+ try:
794
+ final_is_random = (desired_mode == 'Rand')
795
+ with open(self.device_path, 'r+b') as marker_file:
796
+ marker_file.seek(0)
797
+ marker_file.write(self.prep_marker_buffer(final_is_random))
798
+ except Exception:
799
+ pass # Marker write failure shouldn't fail the whole job
800
+
801
+ # Auto-start verification if enabled and write completed successfully
802
+ verify_pct = getattr(self.opts, 'verify_pct', 0)
803
+ auto_verify = getattr(self.opts, 'wipe_mode', "").endswith('+V')
804
+ if auto_verify and verify_pct > 0 and not self.do_abort and not self.exception:
805
+ self.verify_partition(verify_pct)
806
+ # Write marker with verification status after verification completes
807
+ # Use desired_mode to determine if random or zero
808
+ is_random = (desired_mode == 'Rand')
809
+ self._write_marker_with_verify_status(is_random)
810
+ else:
811
+ self.done = True
812
+ except Exception:
813
+ self.exception = traceback.format_exc()
814
+ finally:
815
+ # ALWAYS ensure job is marked as done, even if exception or early return
816
+ if not self.done:
817
+ self.done = True
818
+
819
+ def safe_write(self, fd, chunk):
820
+ """Safe write with error recovery.
821
+
822
+ Returns:
823
+ tuple: (bytes_written, fd) - bytes_written is either:
824
+ - Actual bytes written (success)
825
+ - len(chunk) (failed but non-fatal - skip entire chunk)
826
+ fd might be new if reopened
827
+
828
+ Raises:
829
+ Exception: If should abort (too many consecutive errors)
830
+ """
831
+ consecutive_errors = 0
832
+ while True: # Keep trying until success, skip, or abort
833
+ try:
834
+ bytes_written = os.write(fd, chunk)
835
+ self.reopen_count = 0
836
+ return bytes_written, fd # success
837
+
838
+ except Exception as e:
839
+ consecutive_errors += 1
840
+ self.total_errors += 1
841
+
842
+ # Check if we should abort
843
+ if consecutive_errors >= self.max_consecutive_errors:
844
+ raise Exception(f"{consecutive_errors} consecutive write errors") from e
845
+
846
+ if self.total_errors >= self.max_total_errors:
847
+ raise Exception(f"{self.total_errors} total write errors") from e
848
+
849
+ # Not fatal yet - try reopening if enabled
850
+ if self.reopen_on_error:
851
+ try:
852
+ current_pos = self.total_written
853
+ # Open new fd first
854
+ new_fd = os.open(self.device_path, os.O_WRONLY | os.O_DIRECT)
855
+ try:
856
+ # Seek to correct position on new fd
857
+ os.lseek(new_fd, current_pos, os.SEEK_SET)
858
+ # Only close old fd after new one is ready
859
+ old_fd = fd
860
+ fd = new_fd
861
+ try:
862
+ os.close(old_fd)
863
+ except Exception:
864
+ pass # Old fd close failed, but new fd is good
865
+ self.reopen_count += 1
866
+ except Exception:
867
+ # New fd setup failed, close it and keep using old fd
868
+ os.close(new_fd)
869
+ raise
870
+ except Exception:
871
+ # Reopen failed - count as another error and retry with old fd
872
+ self.total_errors += 1
873
+
874
+ # Retry the write (continue loop)
875
+
876
+ def verify_partition(self, verify_pct):
877
+ """Verify partition with section-by-section analysis"""
878
+ # Initialize verify state
879
+ if not self.verify_phase:
880
+ self.verify_pct = verify_pct
881
+ self.verify_start_mono = time.monotonic()
882
+ self.verify_progress = 0
883
+ self.wr_hists = []
884
+ self.wr_hists.append(SimpleNamespace(mono=self.verify_start_mono, written=0))
885
+ self.verify_phase = True
886
+
887
+ if verify_pct == 0:
888
+ self.verify_result = "skipped"
889
+ return
890
+
891
+ # Fast-fail for zeros
892
+ fast_fail_zeros = (self.expected_pattern == "zeroed")
893
+
894
+ # For unmarked disks: track if ALL bytes are zero
895
+ all_zeros = (self.expected_pattern is None)
896
+
897
+ # Track section results for debugging
898
+ self.section_results = [] # Store (section_idx, result, stats)
899
+
900
+ try:
901
+ # Open with regular buffered I/O
902
+ if not self.opts.dry_run:
903
+ fd = os.open(self.device_path, os.O_RDONLY)
904
+ else:
905
+ fd = None
906
+
907
+ read_chunk_size = 64 * 1024 # 64KB chunks
908
+ SAMPLE_STEP = 23 # Sample every 23rd byte (~4% of data) - prime for even distribution
909
+
910
+ # Skip marker area
911
+ marker_skip = WipeJob.BUFFER_SIZE
912
+ usable_size = self.total_size - marker_skip
913
+
914
+ # Divide disk into 100 sections for sampling
915
+ num_sections = 100
916
+ section_size = usable_size // num_sections
917
+
918
+ # Pre-allocated zero pattern for fast comparison
919
+ ZERO_PATTERN_64K = b'\x00' * (64 * 1024)
920
+
921
+ # Track if any section failed
922
+ overall_failed = False
923
+ failure_reason = ""
924
+
925
+ for section_idx in range(num_sections):
926
+ if self.do_abort or overall_failed:
927
+ break
928
+
929
+ # Reset analysis for THIS SECTION
930
+ section_byte_counts = [0] * 256
931
+ section_samples = 0
932
+ section_found_nonzero = False
933
+
934
+ # Calculate bytes to verify in this section
935
+ bytes_in_section = min(section_size, usable_size - section_idx * section_size)
936
+ bytes_to_verify = int(bytes_in_section * verify_pct / 100)
937
+
938
+ if bytes_to_verify == 0:
939
+ self.section_results.append((section_idx, "skipped", {}))
940
+ continue
941
+
942
+ # Random offset within section
943
+ if bytes_to_verify < bytes_in_section:
944
+ offset_in_section = random.randint(0, bytes_in_section - bytes_to_verify)
945
+ else:
946
+ offset_in_section = 0
947
+
948
+ read_pos = marker_skip + (section_idx * section_size) + offset_in_section
949
+ verified_in_section = 0
950
+
951
+ # Seek to position in this section
952
+ if not self.opts.dry_run:
953
+ os.lseek(fd, read_pos, os.SEEK_SET)
954
+
955
+ # Read and analyze THIS SECTION
956
+ while verified_in_section < bytes_to_verify:
957
+ if self.do_abort:
958
+ break
959
+
960
+ chunk_size = min(read_chunk_size, bytes_to_verify - verified_in_section)
961
+
962
+ if self.opts.dry_run:
963
+ time.sleep(0.01)
964
+ data = b'\x00' * chunk_size
965
+ else:
966
+ data = os.read(fd, chunk_size)
967
+ if not data:
968
+ break
969
+
970
+ # --------------------------------------------------
971
+ # SECTION ANALYSIS
972
+ # --------------------------------------------------
973
+
974
+ # FAST zero check for zeroed pattern
975
+ if fast_fail_zeros:
976
+ # Ultra-fast: compare against pre-allocated zero pattern
977
+ if memoryview(data) != ZERO_PATTERN_64K[:len(data)]:
978
+ failed_offset = read_pos + verified_in_section
979
+ overall_failed = True
980
+ failure_reason = f"non-zero at {Utils.human(failed_offset)}"
981
+ break
982
+
983
+ # FAST check for unmarked disks (looking for all zeros)
984
+ if all_zeros and not section_found_nonzero:
985
+ # Fast check: use bytes.count() which is C-optimized
986
+ if data.count(0) != len(data):
987
+ section_found_nonzero = True
988
+
989
+ # RANDOM pattern analysis (always collect data for analysis)
990
+ # Use memoryview for fast slicing
991
+ mv = memoryview(data)
992
+ data_len = len(data)
993
+
994
+ # Sample every SAMPLE_STEP-th byte
995
+ for i in range(0, data_len, SAMPLE_STEP):
996
+ section_byte_counts[mv[i]] += 1
997
+ section_samples += 1
998
+
999
+ # --------------------------------------------------
1000
+ # END SECTION ANALYSIS
1001
+ # --------------------------------------------------
1002
+
1003
+ verified_in_section += len(data)
1004
+ self.verify_progress += len(data) # Track actual bytes read for progress
1005
+
1006
+ # After reading section, analyze it
1007
+ if overall_failed:
1008
+ break
1009
+
1010
+ # Determine section result
1011
+ if fast_fail_zeros:
1012
+ # Already passed zero check if we got here
1013
+ section_result = "zeroed"
1014
+ section_stats = {}
1015
+
1016
+ elif all_zeros:
1017
+ if not section_found_nonzero:
1018
+ section_result = "zeroed"
1019
+ section_stats = {}
1020
+ else:
1021
+ # Need to check if it's random
1022
+ section_result, section_stats = self._analyze_section_randomness(
1023
+ section_byte_counts, section_samples
1024
+ )
1025
+
1026
+ else: # Expected random
1027
+ section_result, section_stats = self._analyze_section_randomness(
1028
+ section_byte_counts, section_samples
1029
+ )
1030
+
1031
+ # Store section result
1032
+ self.section_results.append((section_idx, section_result, section_stats))
1033
+
1034
+ # Check if section failed
1035
+ if (self.expected_pattern == "random" and section_result != "random") or \
1036
+ (self.expected_pattern == "zeroed" and section_result != "zeroed") or \
1037
+ (self.expected_pattern is None and section_result == "not-wiped"):
1038
+
1039
+ overall_failed = True
1040
+ failure_reason = f"section {section_idx}: {section_result}"
1041
+ break
1042
+
1043
+ # Close file descriptor
1044
+ if fd is not None:
1045
+ os.close(fd)
1046
+
1047
+ # Determine overall result
1048
+ if overall_failed:
1049
+ if self.expected_pattern == "zeroed":
1050
+ self.verify_result = f"not-wiped ({failure_reason})"
1051
+ elif self.expected_pattern == "random":
1052
+ self.verify_result = f"not-wiped ({failure_reason})"
1053
+ else: # unmarked
1054
+ # Count section results
1055
+ zeroed_sections = sum(1 for _, result, _ in self.section_results if result == "zeroed")
1056
+ random_sections = sum(1 for _, result, _ in self.section_results if result == "random")
1057
+ total_checked = len([r for _, r, _ in self.section_results if r != "skipped"])
1058
+
1059
+ if zeroed_sections == total_checked:
1060
+ self.verify_result = "zeroed"
1061
+ self.expected_pattern = "zeroed"
1062
+ elif random_sections == total_checked:
1063
+ self.verify_result = "random"
1064
+ self.expected_pattern = "random"
1065
+ else:
1066
+ self.verify_result = f"mixed ({failure_reason})"
1067
+ else:
1068
+ # All sections passed
1069
+ if self.expected_pattern == "zeroed":
1070
+ self.verify_result = "zeroed"
1071
+ elif self.expected_pattern == "random":
1072
+ self.verify_result = "random"
1073
+ else: # unmarked
1074
+ # Determine from section consensus
1075
+ zeroed_sections = sum(1 for _, result, _ in self.section_results if result == "zeroed")
1076
+ random_sections = sum(1 for _, result, _ in self.section_results if result == "random")
1077
+
1078
+ if zeroed_sections > random_sections:
1079
+ self.verify_result = "zeroed"
1080
+ self.expected_pattern = "zeroed"
1081
+ else:
1082
+ self.verify_result = "random"
1083
+ self.expected_pattern = "random"
1084
+
1085
+ except Exception:
1086
+ self.exception = traceback.format_exc()
1087
+ self.verify_result = "error"
1088
+
1089
+
1090
+
1091
+
1092
+
1093
+
1094
+ def _analyze_section_randomness(self, byte_counts, total_samples):
1095
+ """Analyze if a section appears random"""
1096
+ if total_samples < 100:
1097
+ return "insufficient-data", {"samples": total_samples}
1098
+
1099
+ # Calculate statistics
1100
+ max_count = max(byte_counts)
1101
+ max_freq = max_count / total_samples
1102
+
1103
+ # Count unique bytes seen
1104
+ unique_bytes = sum(1 for count in byte_counts if count > 0)
1105
+
1106
+ # Count completely unused bytes
1107
+ unused_bytes = sum(1 for count in byte_counts if count == 0)
1108
+
1109
+ # Calculate expected frequency and variance
1110
+ expected = total_samples / 256
1111
+ if expected > 0:
1112
+ # Coefficient of variation (measure of dispersion)
1113
+ variance = sum((count - expected) ** 2 for count in byte_counts) / 256
1114
+ std_dev = variance ** 0.5
1115
+ cv = std_dev / expected
1116
+ else:
1117
+ cv = float('inf')
1118
+
1119
+ # Decision logic for "random"
1120
+ # Good random data should:
1121
+ # 1. Use most byte values (>200 unique)
1122
+ # 2. No single byte dominates (<2% frequency)
1123
+ # 3. Relatively even distribution (CV < 2.0)
1124
+ # 4. Not too many zeros (if it's supposed to be random, not zeroed)
1125
+
1126
+ is_random = (unique_bytes > 200 and # >78% of bytes used
1127
+ max_freq < 0.02 and # No byte > 2%
1128
+ cv < 2.0 and # Not too lumpy
1129
+ byte_counts[0] / total_samples < 0.5) # Not mostly zeros
1130
+
1131
+ stats = {
1132
+ "samples": total_samples,
1133
+ "max_freq": max_freq,
1134
+ "unique_bytes": unique_bytes,
1135
+ "unused_bytes": unused_bytes,
1136
+ "cv": cv,
1137
+ "zero_freq": byte_counts[0] / total_samples if total_samples > 0 else 0
1138
+ }
1139
+
1140
+ if is_random:
1141
+ return "random", stats
1142
+ else:
1143
+ # Check if it's zeros
1144
+ if byte_counts[0] / total_samples > 0.95:
1145
+ return "zeroed", stats
1146
+ else:
1147
+ return "not-wiped", stats
1148
+
1149
+
1150
+
1151
+
1152
+
1153
+
1154
+
1155
+
1156
+ def _write_marker_with_verify_status(self, is_random):
1157
+ """Write marker buffer with verification status if verification was performed
1158
+
1159
+ Writes marker if:
1160
+ 1. Verification was performed (verify_result is set)
1161
+ 2. Verification status would change from current marker, OR
1162
+ 3. No marker exists but verify passed (unmarked disk that's all zeros/random)
1163
+ 4. Verification was not aborted
1164
+
1165
+ Args:
1166
+ is_random: bool, whether random data was written (or None to infer from verify_result)
1167
+ """
1168
+ try:
1169
+ if not self.verify_result or self.verify_result == "skipped" or self.do_abort:
1170
+ return
1171
+
1172
+ # Determine verify_status based on verify_result (may include debug info)
1173
+ verify_result_base = self.verify_result.split(' ')[0] if ' ' in self.verify_result else self.verify_result
1174
+
1175
+ if self.expected_pattern == "random" and verify_result_base == "random":
1176
+ new_verify_status = "pass"
1177
+ elif self.expected_pattern == "zeroed" and verify_result_base == "zeroed":
1178
+ new_verify_status = "pass"
1179
+ elif verify_result_base in ("not-wiped", "mixed", "error"):
1180
+ new_verify_status = "fail"
1181
+ else:
1182
+ # Mismatch: expected one pattern but got another
1183
+ new_verify_status = "fail"
1184
+
1185
+ # Read existing marker to check if verify_status would change
1186
+ device_name = os.path.basename(self.device_path)
1187
+ existing_marker = WipeJob.read_marker_buffer(device_name)
1188
+ existing_verify_status = (getattr(existing_marker, 'verify_status', None)
1189
+ if existing_marker else None)
1190
+
1191
+ # For unmarked disks that verified successfully, infer the mode from verify_result
1192
+ if not existing_marker and new_verify_status == "pass":
1193
+ if verify_result_base == "random":
1194
+ is_random = True
1195
+ self.total_written = self.total_size # Mark as fully wiped
1196
+ elif verify_result_base == "zeroed":
1197
+ is_random = False
1198
+ self.total_written = self.total_size # Mark as fully wiped
1199
+ # Write marker for this previously unmarked disk
1200
+ elif existing_marker:
1201
+ # Only write if verify status changed
1202
+ if existing_verify_status == new_verify_status:
1203
+ return
1204
+ # Preserve original scrubbed_bytes if this is a verify-only job
1205
+ if self.total_written == 0:
1206
+ self.total_written = existing_marker.scrubbed_bytes
1207
+ else:
1208
+ # No marker and verify failed - don't write marker
1209
+ return
1210
+
1211
+ # Write marker with verification status
1212
+ if not self.opts.dry_run:
1213
+ with open(self.device_path, 'r+b') as device:
1214
+ device.seek(0)
1215
+ marker_buffer = self.prep_marker_buffer(is_random,
1216
+ verify_status=new_verify_status)
1217
+ device.write(marker_buffer)
1218
+
1219
+ except Exception:
1220
+ # Catch ANY exception in this method to ensure self.done is always set
1221
+ self.exception = traceback.format_exc()
1222
+ finally:
1223
+ # ALWAYS set done, even if there was an exception
1224
+ self.done = True
1225
+
1226
+
1227
+ # Initialize the class-level buffers with mmap for O_DIRECT alignment
1228
+ if WipeJob.buffer is None:
1229
+ # Allocate random buffer with mmap (page-aligned for O_DIRECT)
1230
+ WipeJob.buffer_mem = mmap.mmap(-1, WipeJob.BUFFER_SIZE,
1231
+ flags=mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS)
1232
+ raw_buffer = os.urandom(WipeJob.BUFFER_SIZE)
1233
+ rebalanced = WipeJob._rebalance_buffer(raw_buffer)
1234
+ WipeJob.buffer_mem.write(rebalanced)
1235
+ WipeJob.buffer_mem.seek(0)
1236
+ WipeJob.buffer = memoryview(WipeJob.buffer_mem)
1237
+
1238
+ # Allocate zero buffer with mmap
1239
+ WipeJob.zero_buffer_mem = mmap.mmap(-1, WipeJob.WRITE_SIZE,
1240
+ flags=mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS)
1241
+ WipeJob.zero_buffer_mem.write(b'\x00' * WipeJob.WRITE_SIZE)
1242
+ WipeJob.zero_buffer_mem.seek(0)
1243
+ WipeJob.zero_buffer = memoryview(WipeJob.zero_buffer_mem)