dwipe 2.0.0__py3-none-any.whl → 2.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dwipe/DeviceInfo.py +291 -59
- dwipe/DiskWipe.py +497 -172
- dwipe/DrivePreChecker.py +90 -0
- dwipe/FirmwareWipeTask.py +370 -0
- dwipe/LsblkMonitor.py +124 -0
- dwipe/PersistentState.py +28 -18
- dwipe/Prereqs.py +84 -0
- dwipe/StructuredLogger.py +643 -0
- dwipe/ToolManager.py +618 -0
- dwipe/Utils.py +108 -0
- dwipe/VerifyTask.py +410 -0
- dwipe/WipeJob.py +613 -165
- dwipe/WipeTask.py +148 -0
- dwipe/WriteTask.py +402 -0
- dwipe/main.py +14 -9
- {dwipe-2.0.0.dist-info → dwipe-2.0.2.dist-info}/METADATA +69 -30
- dwipe-2.0.2.dist-info/RECORD +21 -0
- dwipe/WipeJobFuture.py +0 -245
- dwipe-2.0.0.dist-info/RECORD +0 -13
- {dwipe-2.0.0.dist-info → dwipe-2.0.2.dist-info}/WHEEL +0 -0
- {dwipe-2.0.0.dist-info → dwipe-2.0.2.dist-info}/entry_points.txt +0 -0
- {dwipe-2.0.0.dist-info → dwipe-2.0.2.dist-info}/licenses/LICENSE +0 -0
dwipe/WipeJob.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
WipeJob class for handling disk/partition wiping operations
|
|
3
3
|
|
|
4
|
+
Orchestrates task sequences (write and verify operations).
|
|
4
5
|
"""
|
|
5
6
|
# pylint: disable=broad-exception-raised,broad-exception-caught
|
|
6
7
|
import os
|
|
@@ -14,25 +15,17 @@ import mmap
|
|
|
14
15
|
from types import SimpleNamespace
|
|
15
16
|
|
|
16
17
|
from .Utils import Utils
|
|
18
|
+
from .WipeTask import WipeTask
|
|
19
|
+
from .WriteTask import WriteTask, WriteZeroTask, WriteRandTask
|
|
20
|
+
from .VerifyTask import VerifyTask, VerifyZeroTask, VerifyRandTask
|
|
17
21
|
|
|
18
22
|
|
|
19
23
|
class WipeJob:
|
|
20
|
-
"""Handles disk/partition wiping operations with progress tracking
|
|
24
|
+
"""Handles disk/partition wiping operations with progress tracking
|
|
21
25
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
BUFFER_SIZE = WRITE_SIZE # Same size for O_DIRECT
|
|
26
|
-
|
|
27
|
-
# Marker constants (separate from O_DIRECT writes)
|
|
28
|
-
MARKER_SIZE = 16 * 1024 # 16KB for marker
|
|
29
|
-
STATE_OFFSET = 15 * 1024 # where json is written (for marker buffer)
|
|
30
|
-
|
|
31
|
-
# Aligned buffers allocated with mmap (initialized at module load)
|
|
32
|
-
buffer = None # Random data buffer (memoryview)
|
|
33
|
-
buffer_mem = None # Underlying mmap object
|
|
34
|
-
zero_buffer = None # Zero buffer (memoryview)
|
|
35
|
-
zero_buffer_mem = None # Underlying mmap object
|
|
26
|
+
Note: Constants and buffers are now defined in WipeTask base class.
|
|
27
|
+
WipeJob uses WipeTask.BLOCK_SIZE, WipeTask.WRITE_SIZE, WipeTask.buffer, etc.
|
|
28
|
+
"""
|
|
36
29
|
|
|
37
30
|
@staticmethod
|
|
38
31
|
def _get_dirty_kb():
|
|
@@ -124,26 +117,40 @@ class WipeJob:
|
|
|
124
117
|
|
|
125
118
|
return bytes(result)
|
|
126
119
|
|
|
127
|
-
def __init__(self, device_path, total_size, opts=None,
|
|
128
|
-
|
|
120
|
+
def __init__(self, device_path, total_size, opts=None, tasks=None):
|
|
121
|
+
"""Initialize WipeJob as a task orchestrator
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
device_path: Path to device (e.g., '/dev/sda1')
|
|
125
|
+
total_size: Total size in bytes
|
|
126
|
+
opts: Options namespace
|
|
127
|
+
tasks: List of WipeTask instances to execute sequentially (if None, legacy mode)
|
|
128
|
+
"""
|
|
129
|
+
self.opts = opts
|
|
129
130
|
self.device_path = device_path
|
|
130
131
|
self.total_size = total_size
|
|
131
132
|
self.do_abort = False
|
|
132
133
|
self.thread = None
|
|
133
134
|
|
|
135
|
+
# Task orchestration (new)
|
|
136
|
+
self.tasks = tasks if tasks else [] # List of WipeTask instances
|
|
137
|
+
self.current_task = None # Currently executing task
|
|
138
|
+
self.current_task_index = 0 # Index of current task
|
|
139
|
+
|
|
140
|
+
# Legacy attributes (kept for backwards compatibility)
|
|
134
141
|
self.start_mono = time.monotonic() # Track the start time
|
|
135
|
-
self.total_written =
|
|
136
|
-
self.resume_from =
|
|
137
|
-
self.resume_mode =
|
|
142
|
+
self.total_written = 0 # Will be set by tasks
|
|
143
|
+
self.resume_from = 0 # Track resume offset
|
|
144
|
+
self.resume_mode = None # Original mode if resuming (overrides opts.wipe_mode)
|
|
138
145
|
self.wr_hists = [] # list of (mono, written)
|
|
139
146
|
self.done = False
|
|
140
147
|
self.exception = None # in case of issues
|
|
141
148
|
|
|
142
|
-
# Multi-pass tracking
|
|
149
|
+
# Multi-pass tracking (legacy, now handled by task sequence)
|
|
143
150
|
self.passes = getattr(opts, 'passes', 1) # Total number of passes to perform
|
|
144
151
|
self.current_pass = 0 # Current pass number (0-indexed)
|
|
145
152
|
|
|
146
|
-
# Verification tracking
|
|
153
|
+
# Verification tracking (proxied from verify tasks)
|
|
147
154
|
self.verify_phase = False # True when verifying
|
|
148
155
|
self.verify_start_mono = None # Start time of verify phase
|
|
149
156
|
self.verify_progress = 0 # Bytes verified so far
|
|
@@ -157,8 +164,8 @@ class WipeJob:
|
|
|
157
164
|
self.last_marker_update_mono = time.monotonic() - 25 # Last time we wrote progress marker
|
|
158
165
|
self.marker_update_interval = 30 # Update marker every 30 seconds
|
|
159
166
|
|
|
160
|
-
## SLOWDOWN / STALL DETECTION/ABORT FEATURE
|
|
161
|
-
##
|
|
167
|
+
## SLOWDOWN / STALL DETECTION/ABORT FEATURE (proxied from tasks)
|
|
168
|
+
##
|
|
162
169
|
self.slowdown_stop = getattr(opts, 'slowdown_stop', 16)
|
|
163
170
|
self.stall_timeout = getattr(opts, 'stall_timeout', 60)
|
|
164
171
|
self.max_slowdown_ratio = 0
|
|
@@ -168,13 +175,13 @@ class WipeJob:
|
|
|
168
175
|
self.baseline_end_mono = None # When baseline measurement ended
|
|
169
176
|
# Stall tracking
|
|
170
177
|
self.last_progress_mono = time.monotonic() # Last time we made progress
|
|
171
|
-
self.last_progress_written =
|
|
178
|
+
self.last_progress_written = 0 # Bytes written at last progress check
|
|
172
179
|
# For periodic slowdown checks (every 10 seconds)
|
|
173
180
|
self.last_slowdown_check = 0
|
|
174
181
|
# Initialize write history for speed calculation
|
|
175
|
-
self.wr_hists.append(SimpleNamespace(mono=self.start_mono, written=
|
|
176
|
-
|
|
177
|
-
# ERROR ABORT FEATURE
|
|
182
|
+
self.wr_hists.append(SimpleNamespace(mono=self.start_mono, written=0))
|
|
183
|
+
|
|
184
|
+
# ERROR ABORT FEATURE (proxied from write tasks)
|
|
178
185
|
self.max_consecutive_errors = 3 # a control
|
|
179
186
|
self.max_total_errors = 100 # a control
|
|
180
187
|
self.reopen_on_error = True # a control
|
|
@@ -183,6 +190,157 @@ class WipeJob:
|
|
|
183
190
|
|
|
184
191
|
|
|
185
192
|
|
|
193
|
+
def run_tasks(self):
|
|
194
|
+
"""Execute task sequence and update WipeJob state from tasks
|
|
195
|
+
|
|
196
|
+
This is the main orchestration method that runs each task in sequence.
|
|
197
|
+
It proxies task state to WipeJob attributes for backwards compatibility.
|
|
198
|
+
"""
|
|
199
|
+
# Start abort flag sync thread
|
|
200
|
+
stop_sync = [False]
|
|
201
|
+
|
|
202
|
+
def sync_abort_flag():
|
|
203
|
+
"""Continuously sync WipeJob.do_abort to current task"""
|
|
204
|
+
while not stop_sync[0]:
|
|
205
|
+
if self.current_task and not self.current_task.do_abort:
|
|
206
|
+
self.current_task.do_abort = self.do_abort
|
|
207
|
+
time.sleep(0.1) # Check every 100ms
|
|
208
|
+
|
|
209
|
+
sync_thread = threading.Thread(target=sync_abort_flag, daemon=True)
|
|
210
|
+
sync_thread.start()
|
|
211
|
+
|
|
212
|
+
# Track if all write tasks have completed
|
|
213
|
+
all_writes_complete = False
|
|
214
|
+
last_write_task_index = -1
|
|
215
|
+
|
|
216
|
+
# Find the last WriteTask index
|
|
217
|
+
for i, task in enumerate(self.tasks):
|
|
218
|
+
if isinstance(task, WriteTask):
|
|
219
|
+
last_write_task_index = i
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
for i, task in enumerate(self.tasks):
|
|
223
|
+
# Skip already-completed tasks (from resume logic)
|
|
224
|
+
if task.done:
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
if self.do_abort:
|
|
228
|
+
break
|
|
229
|
+
|
|
230
|
+
self.current_task = task
|
|
231
|
+
self.current_task_index = i
|
|
232
|
+
|
|
233
|
+
# Run the task
|
|
234
|
+
task.run_task()
|
|
235
|
+
|
|
236
|
+
# Proxy task state back to WipeJob for compatibility FIRST
|
|
237
|
+
# (before checking exceptions, so we capture metrics even on failure)
|
|
238
|
+
# Write tasks
|
|
239
|
+
if isinstance(task, WriteTask):
|
|
240
|
+
self.total_written = task.total_written
|
|
241
|
+
self.max_slowdown_ratio = max(self.max_slowdown_ratio, task.max_slowdown_ratio)
|
|
242
|
+
self.max_stall_secs = max(self.max_stall_secs, task.max_stall_secs)
|
|
243
|
+
self.total_errors = task.total_errors
|
|
244
|
+
self.reopen_count = task.reopen_count
|
|
245
|
+
# Set expected pattern from write task
|
|
246
|
+
if isinstance(task, WriteZeroTask):
|
|
247
|
+
self.expected_pattern = "zeroed"
|
|
248
|
+
elif isinstance(task, WriteRandTask):
|
|
249
|
+
self.expected_pattern = "random"
|
|
250
|
+
|
|
251
|
+
# Write final marker after last write task completes successfully
|
|
252
|
+
if i == last_write_task_index and not task.exception and not self.do_abort:
|
|
253
|
+
all_writes_complete = True
|
|
254
|
+
self._write_final_marker()
|
|
255
|
+
|
|
256
|
+
# Verify tasks
|
|
257
|
+
elif isinstance(task, VerifyTask):
|
|
258
|
+
self.verify_phase = True
|
|
259
|
+
if self.verify_start_mono is None:
|
|
260
|
+
self.verify_start_mono = task.start_mono
|
|
261
|
+
self.verify_progress = task.total_written # VerifyTask uses total_written for progress
|
|
262
|
+
self.verify_pct = task.verify_pct
|
|
263
|
+
# Extract verify result from task summary
|
|
264
|
+
summary = task.get_summary_dict()
|
|
265
|
+
self.verify_result = summary.get('result', None)
|
|
266
|
+
|
|
267
|
+
# Check for task errors (AFTER proxying state)
|
|
268
|
+
if task.exception:
|
|
269
|
+
self.exception = task.exception
|
|
270
|
+
# For write tasks, failure means wipe didn't succeed
|
|
271
|
+
if isinstance(task, WriteTask):
|
|
272
|
+
# Sync abort state before breaking
|
|
273
|
+
if task.do_abort:
|
|
274
|
+
self.do_abort = True
|
|
275
|
+
break
|
|
276
|
+
# For verify tasks, continue but record the exception
|
|
277
|
+
# (wipe succeeded but verification failed)
|
|
278
|
+
|
|
279
|
+
# Check if task was aborted (sync abort state)
|
|
280
|
+
if task.do_abort and not self.do_abort:
|
|
281
|
+
self.do_abort = True
|
|
282
|
+
break
|
|
283
|
+
|
|
284
|
+
finally:
|
|
285
|
+
# Stop abort sync thread
|
|
286
|
+
stop_sync[0] = True
|
|
287
|
+
|
|
288
|
+
# Write marker on stop to capture current progress (not just last 30s marker)
|
|
289
|
+
if self.do_abort and self.total_written > 0 and not all_writes_complete:
|
|
290
|
+
# Stopped mid-wipe - write progress marker
|
|
291
|
+
# Determine pattern from last completed WriteTask
|
|
292
|
+
is_random = False # Default to zeros
|
|
293
|
+
for task in reversed(self.tasks):
|
|
294
|
+
if isinstance(task, WriteTask) and task.total_written > 0:
|
|
295
|
+
is_random = isinstance(task, WriteRandTask)
|
|
296
|
+
break
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
abort_reason = self._extract_abort_reason()
|
|
300
|
+
with open(self.device_path, 'r+b') as marker_file:
|
|
301
|
+
marker_file.seek(0)
|
|
302
|
+
marker_file.write(self.prep_marker_buffer(is_random, verify_status=None,
|
|
303
|
+
abort_reason=abort_reason))
|
|
304
|
+
marker_file.flush()
|
|
305
|
+
os.fsync(marker_file.fileno())
|
|
306
|
+
except Exception:
|
|
307
|
+
pass # Don't fail the stop on marker write error
|
|
308
|
+
|
|
309
|
+
# Always mark as done when tasks complete
|
|
310
|
+
self.done = True
|
|
311
|
+
self.current_task = None
|
|
312
|
+
|
|
313
|
+
@staticmethod
|
|
314
|
+
def _get_pass_pattern_static(pass_number, total_passes, desired_mode):
|
|
315
|
+
"""Static version of get_pass_pattern for use in start_job()
|
|
316
|
+
|
|
317
|
+
Determine what pattern to write for a given pass.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
pass_number: 0-indexed pass number
|
|
321
|
+
total_passes: Total number of passes
|
|
322
|
+
desired_mode: 'Rand' or 'Zero' - the final desired pattern
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
bool: True for random, False for zeros
|
|
326
|
+
"""
|
|
327
|
+
if total_passes == 1:
|
|
328
|
+
# Single pass: just write desired pattern
|
|
329
|
+
return desired_mode == 'Rand'
|
|
330
|
+
|
|
331
|
+
# Multi-pass: alternate patterns, ending on desired
|
|
332
|
+
# Final pass is always desired pattern
|
|
333
|
+
if pass_number == total_passes - 1:
|
|
334
|
+
return desired_mode == 'Rand'
|
|
335
|
+
|
|
336
|
+
# Earlier passes: alternate, starting with opposite
|
|
337
|
+
if desired_mode == 'Rand':
|
|
338
|
+
# Even passes (0, 2, ...) = Zero, odd (1, 3, ...) = Rand
|
|
339
|
+
return pass_number % 2 == 1
|
|
340
|
+
else:
|
|
341
|
+
# Even passes (0, 2, ...) = Rand, odd (1, 3, ...) = Zero
|
|
342
|
+
return pass_number % 2 == 0
|
|
343
|
+
|
|
186
344
|
@staticmethod
|
|
187
345
|
def start_job(device_path, total_size, opts):
|
|
188
346
|
"""Start a wipe job in a background thread
|
|
@@ -221,8 +379,8 @@ class WipeJob:
|
|
|
221
379
|
# Partial/stopped wipe - resume from where it left off
|
|
222
380
|
resume_from = scrubbed
|
|
223
381
|
# Ensure we don't resume in the marker area
|
|
224
|
-
if resume_from <
|
|
225
|
-
resume_from =
|
|
382
|
+
if resume_from < WipeTask.MARKER_SIZE:
|
|
383
|
+
resume_from = WipeTask.MARKER_SIZE
|
|
226
384
|
# Also ensure not past the end (sanity check)
|
|
227
385
|
if resume_from > total_size * getattr(opts, 'passes', 1):
|
|
228
386
|
resume_from = 0 # Start over if marker corrupted
|
|
@@ -249,9 +407,63 @@ class WipeJob:
|
|
|
249
407
|
# Pattern matches - resume from current position
|
|
250
408
|
resume_from = scrubbed
|
|
251
409
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
410
|
+
# Build task sequence
|
|
411
|
+
tasks = []
|
|
412
|
+
mode = getattr(opts, 'wipe_mode', 'Rand')
|
|
413
|
+
base_mode = mode.replace('+V', '') # Remove verification suffix
|
|
414
|
+
auto_verify = '+V' in mode
|
|
415
|
+
passes = getattr(opts, 'passes', 1)
|
|
416
|
+
|
|
417
|
+
# Check if mode changed - if so, don't resume (start fresh)
|
|
418
|
+
if resume_mode and resume_mode != base_mode:
|
|
419
|
+
# User changed wipe mode (e.g., Zero -> Rand) - start fresh
|
|
420
|
+
resume_from = 0
|
|
421
|
+
resume_mode = None
|
|
422
|
+
|
|
423
|
+
# Use resume_mode if resuming, otherwise use current mode
|
|
424
|
+
desired_mode = resume_mode if resume_mode else base_mode
|
|
425
|
+
|
|
426
|
+
# Build write task sequence (alternating patterns for multi-pass)
|
|
427
|
+
for pass_num in range(passes):
|
|
428
|
+
# Determine pattern for this pass
|
|
429
|
+
is_random = WipeJob._get_pass_pattern_static(pass_num, passes, desired_mode)
|
|
430
|
+
|
|
431
|
+
# Calculate pass offset and resume point
|
|
432
|
+
pass_start = pass_num * total_size
|
|
433
|
+
pass_resume = 0
|
|
434
|
+
if resume_from > pass_start:
|
|
435
|
+
# Resume within this pass
|
|
436
|
+
pass_resume = resume_from - pass_start
|
|
437
|
+
|
|
438
|
+
# Create appropriate write task
|
|
439
|
+
if is_random:
|
|
440
|
+
task = WriteRandTask(device_path, total_size, opts,
|
|
441
|
+
resume_from=pass_resume, pass_number=pass_num)
|
|
442
|
+
else:
|
|
443
|
+
task = WriteZeroTask(device_path, total_size, opts,
|
|
444
|
+
resume_from=pass_resume, pass_number=pass_num)
|
|
445
|
+
tasks.append(task)
|
|
446
|
+
|
|
447
|
+
# If we haven't reached the resume point yet, skip this task
|
|
448
|
+
if resume_from >= (pass_num + 1) * total_size:
|
|
449
|
+
# This pass is already complete, mark task as done
|
|
450
|
+
task.done = True
|
|
451
|
+
task.total_written = total_size
|
|
452
|
+
|
|
453
|
+
# Add auto-verification task if requested
|
|
454
|
+
if auto_verify:
|
|
455
|
+
verify_pct = getattr(opts, 'verify_pct', 2)
|
|
456
|
+
if desired_mode == 'Rand':
|
|
457
|
+
task = VerifyRandTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
458
|
+
else:
|
|
459
|
+
task = VerifyZeroTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
460
|
+
tasks.append(task)
|
|
461
|
+
|
|
462
|
+
# Create WipeJob with task sequence
|
|
463
|
+
job = WipeJob(device_path=device_path, total_size=total_size, opts=opts, tasks=tasks)
|
|
464
|
+
job.resume_from = resume_from
|
|
465
|
+
job.resume_mode = resume_mode
|
|
466
|
+
job.thread = threading.Thread(target=job.run_tasks)
|
|
255
467
|
job.thread.start()
|
|
256
468
|
return job
|
|
257
469
|
|
|
@@ -265,44 +477,43 @@ class WipeJob:
|
|
|
265
477
|
opts: Options namespace with verify_pct
|
|
266
478
|
expected_pattern: "zeroed", "random", or None (auto-detect)
|
|
267
479
|
"""
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
480
|
+
# Read existing marker to determine expected pattern if not specified
|
|
481
|
+
device_name = os.path.basename(device_path)
|
|
482
|
+
existing_marker = WipeJob.read_marker_buffer(device_name)
|
|
483
|
+
if existing_marker and expected_pattern is None:
|
|
484
|
+
expected_pattern = "random" if existing_marker.mode == 'Rand' else "zeroed"
|
|
485
|
+
|
|
271
486
|
verify_pct = getattr(opts, 'verify_pct', 0)
|
|
272
487
|
if verify_pct == 0:
|
|
273
488
|
verify_pct = 2 # Default to 2% if not set
|
|
274
489
|
|
|
275
|
-
#
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
490
|
+
# Create verify task
|
|
491
|
+
tasks = []
|
|
492
|
+
if expected_pattern == "random":
|
|
493
|
+
task = VerifyRandTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
494
|
+
else:
|
|
495
|
+
task = VerifyZeroTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
496
|
+
tasks.append(task)
|
|
497
|
+
|
|
498
|
+
# Create WipeJob with verify task
|
|
499
|
+
job = WipeJob(device_path=device_path, total_size=total_size, opts=opts, tasks=tasks)
|
|
500
|
+
job.is_verify_only = True
|
|
501
|
+
job.expected_pattern = expected_pattern
|
|
502
|
+
job.verify_phase = True
|
|
281
503
|
|
|
282
504
|
def verify_runner():
|
|
283
505
|
try:
|
|
284
|
-
#
|
|
285
|
-
|
|
286
|
-
existing_marker = WipeJob.read_marker_buffer(device_name)
|
|
287
|
-
if existing_marker:
|
|
288
|
-
# Infer expected pattern from marker if not already set
|
|
289
|
-
if job.expected_pattern is None:
|
|
290
|
-
job.expected_pattern = "random" if existing_marker.mode == 'Rand' else "zeroed"
|
|
291
|
-
|
|
292
|
-
job.verify_partition(verify_pct)
|
|
506
|
+
# Run the verify task
|
|
507
|
+
job.run_tasks()
|
|
293
508
|
|
|
294
509
|
# Write marker with verification status
|
|
295
510
|
if existing_marker:
|
|
296
511
|
is_random = existing_marker.mode == 'Rand'
|
|
297
512
|
job._write_marker_with_verify_status(is_random)
|
|
298
|
-
# Note: _write_marker_with_verify_status sets job.done in its finally block
|
|
299
|
-
else:
|
|
300
|
-
# No marker - just mark as done
|
|
301
|
-
job.done = True
|
|
302
513
|
except Exception:
|
|
303
514
|
job.exception = traceback.format_exc()
|
|
304
515
|
finally:
|
|
305
|
-
# ALWAYS ensure job is marked as done
|
|
516
|
+
# ALWAYS ensure job is marked as done
|
|
306
517
|
if not job.done:
|
|
307
518
|
job.done = True
|
|
308
519
|
|
|
@@ -314,50 +525,50 @@ class WipeJob:
|
|
|
314
525
|
"""Check for stall (no progress) - called frequently"""
|
|
315
526
|
if self.stall_timeout <= 0:
|
|
316
527
|
return False
|
|
317
|
-
|
|
528
|
+
|
|
318
529
|
time_since_progress = current_monotonic - self.last_progress_mono
|
|
319
530
|
self.max_stall_secs = max(time_since_progress, self.max_stall_secs)
|
|
320
531
|
if time_since_progress >= self.stall_timeout:
|
|
321
532
|
self.do_abort = True
|
|
322
533
|
self.exception = f"Stall detected: No progress for {time_since_progress:.1f} seconds"
|
|
323
534
|
return True
|
|
324
|
-
|
|
535
|
+
|
|
325
536
|
return False
|
|
326
537
|
|
|
327
538
|
def _check_for_slowdown(self, current_monotonic):
|
|
328
539
|
"""Check for slowdown - called every 10 seconds"""
|
|
329
540
|
if self.slowdown_stop <= 0 or self.baseline_speed is None or self.baseline_speed <= 0:
|
|
330
541
|
return False
|
|
331
|
-
|
|
542
|
+
|
|
332
543
|
# Calculate current speed over last 30 seconds
|
|
333
544
|
floor = current_monotonic - 30
|
|
334
545
|
recent_history = [h for h in self.wr_hists if h.mono >= floor]
|
|
335
|
-
|
|
546
|
+
|
|
336
547
|
if len(recent_history) >= 2:
|
|
337
548
|
recent_start = recent_history[0]
|
|
338
549
|
recent_written = self.total_written - recent_start.written
|
|
339
550
|
recent_elapsed = current_monotonic - recent_start.mono
|
|
340
|
-
|
|
551
|
+
|
|
341
552
|
if recent_elapsed > 1.0:
|
|
342
553
|
current_speed = recent_written / recent_elapsed
|
|
343
554
|
self.baseline_speed = max(self.baseline_speed, current_speed)
|
|
344
555
|
slowdown_ratio = self.baseline_speed / max(current_speed, 1)
|
|
345
556
|
slowdown_ratio = int(round(slowdown_ratio, 0))
|
|
346
557
|
self.max_slowdown_ratio = max(self.max_slowdown_ratio, slowdown_ratio)
|
|
347
|
-
|
|
558
|
+
|
|
348
559
|
if slowdown_ratio > self.slowdown_stop:
|
|
349
560
|
self.do_abort = True
|
|
350
561
|
self.exception = (f"Slowdown abort: ({Utils.human(current_speed)}B/s)"
|
|
351
562
|
f" is 1/{slowdown_ratio} baseline")
|
|
352
563
|
return True
|
|
353
|
-
|
|
564
|
+
|
|
354
565
|
return False
|
|
355
566
|
|
|
356
567
|
def _update_baseline_if_needed(self, current_monotonic):
|
|
357
568
|
"""Update baseline speed measurement if still in first 60 seconds"""
|
|
358
569
|
if self.baseline_speed is not None:
|
|
359
570
|
return # Baseline already established
|
|
360
|
-
|
|
571
|
+
|
|
361
572
|
if (current_monotonic - self.start_mono) >= 60:
|
|
362
573
|
total_written_60s = self.total_written - self.resume_from
|
|
363
574
|
elapsed_60s = current_monotonic - self.start_mono
|
|
@@ -384,6 +595,15 @@ class WipeJob:
|
|
|
384
595
|
- Flushing phase: 100% FLUSH while kernel syncs to device
|
|
385
596
|
- Verify phase (v0-v100%): elapsed/rate/eta for verification only
|
|
386
597
|
"""
|
|
598
|
+
# NEW: Proxy to current task if using task-based architecture
|
|
599
|
+
if self.current_task is not None:
|
|
600
|
+
# Continuously proxy task metrics to job for display (especially for WriteTask)
|
|
601
|
+
if isinstance(self.current_task, WriteTask):
|
|
602
|
+
self.max_slowdown_ratio = max(self.max_slowdown_ratio, self.current_task.max_slowdown_ratio)
|
|
603
|
+
self.max_stall_secs = max(self.max_stall_secs, self.current_task.max_stall_secs)
|
|
604
|
+
return self.current_task.get_status()
|
|
605
|
+
|
|
606
|
+
# LEGACY: Original implementation for backwards compatibility
|
|
387
607
|
pct_str, rate_str, when_str = '', '', ''
|
|
388
608
|
mono = time.monotonic()
|
|
389
609
|
|
|
@@ -465,7 +685,208 @@ class WipeJob:
|
|
|
465
685
|
|
|
466
686
|
return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str
|
|
467
687
|
|
|
468
|
-
def
|
|
688
|
+
def get_plan_dict(self, mode=None):
|
|
689
|
+
"""Generate plan dictionary for structured logging
|
|
690
|
+
|
|
691
|
+
Args:
|
|
692
|
+
mode: Optional mode override (e.g., 'Rand', 'Zero', 'Rand+V')
|
|
693
|
+
If None, uses self.opts.wipe_mode
|
|
694
|
+
|
|
695
|
+
Returns:
|
|
696
|
+
dict: Plan section with operation, steps, mode, verify settings, passes
|
|
697
|
+
"""
|
|
698
|
+
if mode is None:
|
|
699
|
+
mode = getattr(self.opts, 'wipe_mode', 'Unknown')
|
|
700
|
+
|
|
701
|
+
# Build steps list
|
|
702
|
+
steps = []
|
|
703
|
+
|
|
704
|
+
# Extract base mode (remove +V suffix)
|
|
705
|
+
base_mode = mode.replace('+V', '')
|
|
706
|
+
verify_in_mode = '+V' in mode
|
|
707
|
+
|
|
708
|
+
# Add wipe steps (one per pass)
|
|
709
|
+
for pass_num in range(self.passes):
|
|
710
|
+
if self.passes > 1:
|
|
711
|
+
steps.append(f"wipe {base_mode} {self.device_path} (pass {pass_num + 1}/{self.passes})")
|
|
712
|
+
else:
|
|
713
|
+
steps.append(f"wipe {base_mode} {self.device_path}")
|
|
714
|
+
|
|
715
|
+
# Add verify step if enabled
|
|
716
|
+
verify_pct = getattr(self.opts, 'verify_pct', 0)
|
|
717
|
+
if verify_in_mode or verify_pct > 0:
|
|
718
|
+
if verify_pct > 0 and verify_pct < 100:
|
|
719
|
+
steps.append(f"verify {base_mode} ({verify_pct}% sample)")
|
|
720
|
+
else:
|
|
721
|
+
steps.append(f"verify {base_mode}")
|
|
722
|
+
|
|
723
|
+
return {
|
|
724
|
+
"operation": "verify" if self.is_verify_only else "wipe",
|
|
725
|
+
"steps": steps,
|
|
726
|
+
"mode": base_mode,
|
|
727
|
+
"verify_enabled": verify_in_mode or verify_pct > 0,
|
|
728
|
+
"verify_pct": verify_pct,
|
|
729
|
+
"passes": self.passes,
|
|
730
|
+
"slowdown_stop_threshold": self.slowdown_stop,
|
|
731
|
+
"stall_timeout_threshold": self.stall_timeout,
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
def get_summary_dict(self):
|
|
735
|
+
"""Generate complete summary dictionary for structured logging
|
|
736
|
+
|
|
737
|
+
Returns:
|
|
738
|
+
dict: Summary with top-level aggregates and per-step details
|
|
739
|
+
"""
|
|
740
|
+
# NEW: Aggregate task summaries if using task-based architecture
|
|
741
|
+
if self.tasks:
|
|
742
|
+
mono = time.monotonic()
|
|
743
|
+
total_elapsed = mono - self.start_mono
|
|
744
|
+
|
|
745
|
+
# Aggregate all task summaries
|
|
746
|
+
steps = []
|
|
747
|
+
total_errors = 0
|
|
748
|
+
for task in self.tasks:
|
|
749
|
+
task_summary = task.get_summary_dict()
|
|
750
|
+
steps.append(task_summary)
|
|
751
|
+
total_errors += task_summary.get('errors', 0)
|
|
752
|
+
|
|
753
|
+
# Calculate actual percentage complete from total work done
|
|
754
|
+
total_work = self.total_size * self.passes
|
|
755
|
+
pct_complete = min(100, (self.total_written / total_work) * 100 if total_work > 0 else 0)
|
|
756
|
+
|
|
757
|
+
# Extract abort reason and error message if stopped with exception
|
|
758
|
+
abort_reason = None
|
|
759
|
+
error_message = None
|
|
760
|
+
if self.do_abort and self.exception:
|
|
761
|
+
abort_reason = self._extract_abort_reason()
|
|
762
|
+
error_message = self.exception
|
|
763
|
+
|
|
764
|
+
# Build top-level summary
|
|
765
|
+
summary = {
|
|
766
|
+
"result": "stopped" if self.do_abort else "completed",
|
|
767
|
+
"total_elapsed": Utils.ago_str(int(total_elapsed)),
|
|
768
|
+
"total_errors": total_errors,
|
|
769
|
+
"pct_complete": round(pct_complete, 1),
|
|
770
|
+
"resumed_from_bytes": self.resume_from,
|
|
771
|
+
"steps": steps,
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
# Add error information if present
|
|
775
|
+
if abort_reason:
|
|
776
|
+
summary["abort_reason"] = abort_reason
|
|
777
|
+
if error_message:
|
|
778
|
+
summary["error_message"] = error_message
|
|
779
|
+
|
|
780
|
+
return summary
|
|
781
|
+
|
|
782
|
+
# LEGACY: Original implementation for backwards compatibility
|
|
783
|
+
mono = time.monotonic()
|
|
784
|
+
write_elapsed = mono - self.start_mono
|
|
785
|
+
|
|
786
|
+
# Calculate write rates
|
|
787
|
+
write_rate_bps = self.total_written / write_elapsed if write_elapsed > 0 else 0
|
|
788
|
+
|
|
789
|
+
# Calculate completion percentage
|
|
790
|
+
total_work = self.total_size * self.passes
|
|
791
|
+
pct_complete = min(100, (self.total_written / total_work) * 100 if total_work > 0 else 0)
|
|
792
|
+
|
|
793
|
+
# Build wipe step
|
|
794
|
+
mode = getattr(self.opts, 'wipe_mode', 'Unknown').replace('+V', '')
|
|
795
|
+
wipe_step = {
|
|
796
|
+
"step": f"wipe {mode} {self.device_path}",
|
|
797
|
+
"elapsed": Utils.ago_str(int(write_elapsed)),
|
|
798
|
+
"rate": f"{Utils.human(int(write_rate_bps))}/s",
|
|
799
|
+
"bytes_written": self.total_written,
|
|
800
|
+
"bytes_total": total_work,
|
|
801
|
+
"passes_total": self.passes,
|
|
802
|
+
"passes_completed": min(self.total_written // self.total_size, self.passes),
|
|
803
|
+
"current_pass": self.current_pass,
|
|
804
|
+
"peak_write_rate": f"{Utils.human(int(self.baseline_speed))}/s" if self.baseline_speed else None,
|
|
805
|
+
"worst_stall": Utils.ago_str(int(self.max_stall_secs)),
|
|
806
|
+
"worst_slowdown_ratio": round(self.max_slowdown_ratio, 1),
|
|
807
|
+
"errors": self.total_errors,
|
|
808
|
+
"reopen_count": self.reopen_count,
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
# Build steps array
|
|
812
|
+
steps = [wipe_step]
|
|
813
|
+
|
|
814
|
+
# Add verification step if verify was done
|
|
815
|
+
total_elapsed = write_elapsed
|
|
816
|
+
if self.verify_start_mono:
|
|
817
|
+
verify_elapsed = mono - self.verify_start_mono
|
|
818
|
+
total_elapsed = write_elapsed + verify_elapsed
|
|
819
|
+
verify_rate_bps = self.verify_progress / verify_elapsed if verify_elapsed > 0 else 0
|
|
820
|
+
|
|
821
|
+
# Extract verify detail from verify_result if it contains extra info
|
|
822
|
+
verify_detail = None
|
|
823
|
+
if self.verify_result and '(' in str(self.verify_result):
|
|
824
|
+
# Extract detail from results like "not-wiped (non-zero at 22K)"
|
|
825
|
+
verify_detail = str(self.verify_result).split('(')[1].rstrip(')')
|
|
826
|
+
|
|
827
|
+
verify_pct = getattr(self.opts, 'verify_pct', 0)
|
|
828
|
+
verify_label = f"verify {mode}"
|
|
829
|
+
if verify_pct > 0 and verify_pct < 100:
|
|
830
|
+
verify_label += f" ({verify_pct}% sample)"
|
|
831
|
+
|
|
832
|
+
verify_step = {
|
|
833
|
+
"step": verify_label,
|
|
834
|
+
"elapsed": Utils.ago_str(int(verify_elapsed)),
|
|
835
|
+
"rate": f"{Utils.human(int(verify_rate_bps))}/s",
|
|
836
|
+
"bytes_checked": self.verify_progress,
|
|
837
|
+
"result": self.verify_result,
|
|
838
|
+
}
|
|
839
|
+
if verify_detail:
|
|
840
|
+
verify_step["verify_detail"] = verify_detail
|
|
841
|
+
|
|
842
|
+
steps.append(verify_step)
|
|
843
|
+
|
|
844
|
+
# Extract abort reason and error message if stopped with exception
|
|
845
|
+
abort_reason = None
|
|
846
|
+
error_message = None
|
|
847
|
+
if self.do_abort and self.exception:
|
|
848
|
+
abort_reason = self._extract_abort_reason()
|
|
849
|
+
error_message = self.exception
|
|
850
|
+
|
|
851
|
+
# Build top-level summary
|
|
852
|
+
summary = {
|
|
853
|
+
"result": "stopped" if self.do_abort else "completed",
|
|
854
|
+
"total_elapsed": Utils.ago_str(int(total_elapsed)),
|
|
855
|
+
"total_errors": self.total_errors,
|
|
856
|
+
"pct_complete": round(pct_complete, 1),
|
|
857
|
+
"resumed_from_bytes": self.resume_from,
|
|
858
|
+
"steps": steps,
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
# Add error information if present
|
|
862
|
+
if abort_reason:
|
|
863
|
+
summary["abort_reason"] = abort_reason
|
|
864
|
+
if error_message:
|
|
865
|
+
summary["error_message"] = error_message
|
|
866
|
+
|
|
867
|
+
return summary
|
|
868
|
+
|
|
869
|
+
def _extract_abort_reason(self):
|
|
870
|
+
"""Extract short error reason from exception for marker
|
|
871
|
+
|
|
872
|
+
Returns short lowercase error type like 'slowdown', 'stall', or None
|
|
873
|
+
if no user-facing abort reason exists (only used for abnormal stops).
|
|
874
|
+
"""
|
|
875
|
+
if not self.exception:
|
|
876
|
+
return None
|
|
877
|
+
|
|
878
|
+
exc_lower = self.exception.lower()
|
|
879
|
+
|
|
880
|
+
# Check for known abort conditions
|
|
881
|
+
if 'slowdown abort' in exc_lower:
|
|
882
|
+
return 'slowdown'
|
|
883
|
+
elif 'stall detected' in exc_lower:
|
|
884
|
+
return 'stall'
|
|
885
|
+
|
|
886
|
+
# Don't record internal errors in marker (marker write failures, tracebacks, etc.)
|
|
887
|
+
return None
|
|
888
|
+
|
|
889
|
+
def prep_marker_buffer(self, is_random, verify_status=None, abort_reason=None):
|
|
469
890
|
"""Get the 1st 16KB to write:
|
|
470
891
|
- 15K zeros
|
|
471
892
|
- JSON status + zero fill to 1KB
|
|
@@ -477,10 +898,12 @@ class WipeJob:
|
|
|
477
898
|
- passes: Number of passes intended/completed
|
|
478
899
|
- mode: 'Rand' or 'Zero' (final desired pattern)
|
|
479
900
|
- verify_status: 'pass', 'fail', or omitted (not verified)
|
|
901
|
+
- abort_reason: short error description if job failed abnormally
|
|
480
902
|
|
|
481
903
|
Args:
|
|
482
904
|
is_random: bool, whether random data was written
|
|
483
905
|
verify_status: str, "pass", "fail", or None (not verified)
|
|
906
|
+
abort_reason: str, short error description or None (no error)
|
|
484
907
|
"""
|
|
485
908
|
data = {"unixtime": int(time.time()),
|
|
486
909
|
"scrubbed_bytes": self.total_written,
|
|
@@ -490,12 +913,14 @@ class WipeJob:
|
|
|
490
913
|
}
|
|
491
914
|
if verify_status is not None:
|
|
492
915
|
data["verify_status"] = verify_status
|
|
916
|
+
if abort_reason is not None:
|
|
917
|
+
data["abort_reason"] = abort_reason
|
|
493
918
|
json_data = json.dumps(data).encode('utf-8')
|
|
494
|
-
buffer = bytearray(
|
|
495
|
-
buffer[:
|
|
496
|
-
buffer[
|
|
497
|
-
remaining_size =
|
|
498
|
-
buffer[
|
|
919
|
+
buffer = bytearray(WipeTask.MARKER_SIZE) # Only 16KB, not 1MB
|
|
920
|
+
buffer[:WipeTask.STATE_OFFSET] = b'\x00' * WipeTask.STATE_OFFSET
|
|
921
|
+
buffer[WipeTask.STATE_OFFSET:WipeTask.STATE_OFFSET + len(json_data)] = json_data
|
|
922
|
+
remaining_size = WipeTask.MARKER_SIZE - (WipeTask.STATE_OFFSET + len(json_data))
|
|
923
|
+
buffer[WipeTask.STATE_OFFSET + len(json_data):] = b'\x00' * remaining_size
|
|
499
924
|
return buffer
|
|
500
925
|
|
|
501
926
|
def get_pass_pattern(self, pass_number, desired_mode):
|
|
@@ -555,6 +980,8 @@ class WipeJob:
|
|
|
555
980
|
with open(self.device_path, 'r+b') as marker_file:
|
|
556
981
|
marker_file.seek(0)
|
|
557
982
|
marker_file.write(self.prep_marker_buffer(is_random))
|
|
983
|
+
marker_file.flush()
|
|
984
|
+
os.fsync(marker_file.fileno())
|
|
558
985
|
self.last_marker_update_mono = now_mono
|
|
559
986
|
except Exception:
|
|
560
987
|
# If marker update fails, just continue - we'll try again in 30s
|
|
@@ -577,7 +1004,7 @@ class WipeJob:
|
|
|
577
1004
|
try:
|
|
578
1005
|
with open(device_path, 'rb') as device:
|
|
579
1006
|
# Skip past the marker area (first 16KB)
|
|
580
|
-
device.seek(
|
|
1007
|
+
device.seek(WipeTask.WRITE_SIZE)
|
|
581
1008
|
data = device.read(sample_size)
|
|
582
1009
|
|
|
583
1010
|
if not data:
|
|
@@ -623,15 +1050,15 @@ class WipeJob:
|
|
|
623
1050
|
try:
|
|
624
1051
|
with open(f'/dev/{device_name}', 'rb') as device:
|
|
625
1052
|
device.seek(0)
|
|
626
|
-
buffer = device.read(
|
|
1053
|
+
buffer = device.read(WipeTask.MARKER_SIZE)
|
|
627
1054
|
except Exception:
|
|
628
1055
|
return None # cannot find info
|
|
629
1056
|
|
|
630
|
-
if buffer[:
|
|
1057
|
+
if buffer[:WipeTask.STATE_OFFSET] != b'\x00' * (WipeTask.STATE_OFFSET):
|
|
631
1058
|
return None # First 15 KB are not zeros
|
|
632
1059
|
|
|
633
1060
|
# Extract JSON data from the next 1 KB Strip trailing zeros
|
|
634
|
-
json_data_bytes = buffer[
|
|
1061
|
+
json_data_bytes = buffer[WipeTask.STATE_OFFSET:WipeTask.MARKER_SIZE].rstrip(b'\x00')
|
|
635
1062
|
|
|
636
1063
|
if not json_data_bytes:
|
|
637
1064
|
return None # No JSON data found
|
|
@@ -646,13 +1073,13 @@ class WipeJob:
|
|
|
646
1073
|
for key, value in data.items():
|
|
647
1074
|
if key in ('unixtime', 'scrubbed_bytes', 'size_bytes', 'passes') and isinstance(value, int):
|
|
648
1075
|
rv[key] = value
|
|
649
|
-
elif key in ('mode', 'verify_status') and isinstance(value, str):
|
|
1076
|
+
elif key in ('mode', 'verify_status', 'abort_reason') and isinstance(value, str):
|
|
650
1077
|
rv[key] = value
|
|
651
1078
|
else:
|
|
652
1079
|
return None # bogus data
|
|
653
|
-
# Old markers: 4 fields (no passes, no verify_status)
|
|
654
|
-
# New markers: 5 fields minimum (with passes), 6 with verify_status
|
|
655
|
-
if len(rv) < 4 or len(rv) >
|
|
1080
|
+
# Old markers: 4 fields (no passes, no verify_status, no abort_reason)
|
|
1081
|
+
# New markers: 5 fields minimum (with passes), 6 with verify_status, 7 with abort_reason
|
|
1082
|
+
if len(rv) < 4 or len(rv) > 7:
|
|
656
1083
|
return None # bogus data
|
|
657
1084
|
return SimpleNamespace(**rv)
|
|
658
1085
|
|
|
@@ -684,10 +1111,7 @@ class WipeJob:
|
|
|
684
1111
|
|
|
685
1112
|
# Open device with O_DIRECT for unbuffered I/O (bypasses page cache)
|
|
686
1113
|
# O_DIRECT gives maximum performance with zero dirty pages
|
|
687
|
-
|
|
688
|
-
fd = os.open(self.device_path, os.O_WRONLY | os.O_DIRECT)
|
|
689
|
-
else:
|
|
690
|
-
fd = None
|
|
1114
|
+
fd = os.open(self.device_path, os.O_WRONLY | os.O_DIRECT)
|
|
691
1115
|
|
|
692
1116
|
try:
|
|
693
1117
|
# Continue writing until we reach target_bytes
|
|
@@ -695,18 +1119,17 @@ class WipeJob:
|
|
|
695
1119
|
# Calculate current pass and offset within pass
|
|
696
1120
|
self.current_pass = self.total_written // self.total_size
|
|
697
1121
|
offset_in_pass = self.total_written % self.total_size
|
|
698
|
-
|
|
1122
|
+
|
|
699
1123
|
# SKIP MARKER AREA - don't overwrite it!
|
|
700
|
-
if offset_in_pass <
|
|
701
|
-
self.total_written +=
|
|
702
|
-
offset_in_pass =
|
|
1124
|
+
if offset_in_pass < WipeTask.MARKER_SIZE:
|
|
1125
|
+
self.total_written += WipeTask.MARKER_SIZE - offset_in_pass
|
|
1126
|
+
offset_in_pass = WipeTask.MARKER_SIZE
|
|
703
1127
|
|
|
704
1128
|
# Determine pattern for this pass (alternating for multi-pass)
|
|
705
1129
|
is_random_pass = self.get_pass_pattern(self.current_pass, desired_mode)
|
|
706
1130
|
|
|
707
1131
|
# Seek to current position (O_DIRECT requires block-aligned seeks)
|
|
708
|
-
|
|
709
|
-
os.lseek(fd, offset_in_pass, os.SEEK_SET)
|
|
1132
|
+
os.lseek(fd, offset_in_pass, os.SEEK_SET)
|
|
710
1133
|
|
|
711
1134
|
# Write until end of current pass or target_bytes, whichever comes first
|
|
712
1135
|
pass_remaining = self.total_size - offset_in_pass
|
|
@@ -720,11 +1143,11 @@ class WipeJob:
|
|
|
720
1143
|
|
|
721
1144
|
# Update baseline if needed (first 60 seconds)
|
|
722
1145
|
self._update_baseline_if_needed(current_mono)
|
|
723
|
-
|
|
1146
|
+
|
|
724
1147
|
# Check for stall (frequently)
|
|
725
1148
|
if self._check_for_stall(current_mono):
|
|
726
1149
|
break
|
|
727
|
-
|
|
1150
|
+
|
|
728
1151
|
# Check for slowdown (every 10 seconds)
|
|
729
1152
|
if self.baseline_speed is not None:
|
|
730
1153
|
time_since_last_check = current_mono - self.last_slowdown_check
|
|
@@ -732,47 +1155,43 @@ class WipeJob:
|
|
|
732
1155
|
if self._check_for_slowdown(current_mono):
|
|
733
1156
|
break
|
|
734
1157
|
self.last_slowdown_check = current_mono
|
|
735
|
-
|
|
1158
|
+
|
|
736
1159
|
# Update progress tracking
|
|
737
1160
|
if self.total_written > self.last_progress_written:
|
|
738
1161
|
self.last_progress_mono = current_mono
|
|
739
1162
|
|
|
740
|
-
|
|
1163
|
+
|
|
741
1164
|
# Calculate chunk size (must be block-aligned for O_DIRECT)
|
|
742
1165
|
remaining = bytes_to_write_this_pass - pass_bytes_written
|
|
743
|
-
chunk_size = min(
|
|
1166
|
+
chunk_size = min(WipeTask.WRITE_SIZE, remaining)
|
|
744
1167
|
# Round down to block boundary
|
|
745
|
-
chunk_size = (chunk_size //
|
|
1168
|
+
chunk_size = (chunk_size // WipeTask.BLOCK_SIZE) * WipeTask.BLOCK_SIZE
|
|
746
1169
|
if chunk_size == 0:
|
|
747
1170
|
break
|
|
748
1171
|
|
|
749
1172
|
# Select buffer based on pass type
|
|
750
1173
|
if is_random_pass:
|
|
751
1174
|
# Use slice of random buffer (still aligned via memoryview)
|
|
752
|
-
chunk =
|
|
1175
|
+
chunk = WipeTask.buffer[:chunk_size]
|
|
753
1176
|
else:
|
|
754
1177
|
# Use zero buffer
|
|
755
|
-
chunk =
|
|
1178
|
+
chunk = WipeTask.zero_buffer[:chunk_size]
|
|
756
1179
|
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
# Save exception for debugging
|
|
766
|
-
self.exception = str(e)
|
|
767
|
-
self.do_abort = True
|
|
768
|
-
bytes_written = 0
|
|
1180
|
+
try:
|
|
1181
|
+
# Write with O_DIRECT (bypasses page cache)
|
|
1182
|
+
bytes_written, fd = self.safe_write(fd, chunk)
|
|
1183
|
+
except Exception as e:
|
|
1184
|
+
# Save exception for debugging
|
|
1185
|
+
self.exception = str(e)
|
|
1186
|
+
self.do_abort = True
|
|
1187
|
+
bytes_written = 0
|
|
769
1188
|
|
|
770
1189
|
self.total_written += bytes_written
|
|
771
1190
|
pass_bytes_written += bytes_written
|
|
772
1191
|
|
|
773
1192
|
# Periodically update marker for crash recovery (every 30s)
|
|
774
1193
|
# Note: marker writes use separate buffered file handle
|
|
775
|
-
if
|
|
1194
|
+
if self.total_written > WipeTask.MARKER_SIZE:
|
|
776
1195
|
marker_is_random = (desired_mode == 'Rand')
|
|
777
1196
|
self.maybe_update_marker(marker_is_random)
|
|
778
1197
|
|
|
@@ -789,14 +1208,17 @@ class WipeJob:
|
|
|
789
1208
|
# Write final marker buffer at beginning after ALL passes complete
|
|
790
1209
|
# Skip marker write on abort to avoid blocking on problematic devices
|
|
791
1210
|
# Use separate buffered file handle (marker is not O_DIRECT aligned)
|
|
792
|
-
if
|
|
1211
|
+
if self.total_written > 0 and not self.do_abort:
|
|
793
1212
|
try:
|
|
794
1213
|
final_is_random = (desired_mode == 'Rand')
|
|
795
1214
|
with open(self.device_path, 'r+b') as marker_file:
|
|
796
1215
|
marker_file.seek(0)
|
|
797
1216
|
marker_file.write(self.prep_marker_buffer(final_is_random))
|
|
1217
|
+
marker_file.flush()
|
|
1218
|
+
os.fsync(marker_file.fileno())
|
|
798
1219
|
except Exception:
|
|
799
|
-
|
|
1220
|
+
# Log marker write failure but don't fail the whole job
|
|
1221
|
+
self.exception = f"Marker write failed: {traceback.format_exc()}"
|
|
800
1222
|
|
|
801
1223
|
# Auto-start verification if enabled and write completed successfully
|
|
802
1224
|
verify_pct = getattr(self.opts, 'verify_pct', 0)
|
|
@@ -818,13 +1240,13 @@ class WipeJob:
|
|
|
818
1240
|
|
|
819
1241
|
def safe_write(self, fd, chunk):
|
|
820
1242
|
"""Safe write with error recovery.
|
|
821
|
-
|
|
1243
|
+
|
|
822
1244
|
Returns:
|
|
823
1245
|
tuple: (bytes_written, fd) - bytes_written is either:
|
|
824
1246
|
- Actual bytes written (success)
|
|
825
1247
|
- len(chunk) (failed but non-fatal - skip entire chunk)
|
|
826
1248
|
fd might be new if reopened
|
|
827
|
-
|
|
1249
|
+
|
|
828
1250
|
Raises:
|
|
829
1251
|
Exception: If should abort (too many consecutive errors)
|
|
830
1252
|
"""
|
|
@@ -834,7 +1256,7 @@ class WipeJob:
|
|
|
834
1256
|
bytes_written = os.write(fd, chunk)
|
|
835
1257
|
self.reopen_count = 0
|
|
836
1258
|
return bytes_written, fd # success
|
|
837
|
-
|
|
1259
|
+
|
|
838
1260
|
except Exception as e:
|
|
839
1261
|
consecutive_errors += 1
|
|
840
1262
|
self.total_errors += 1
|
|
@@ -890,27 +1312,24 @@ class WipeJob:
|
|
|
890
1312
|
|
|
891
1313
|
# Fast-fail for zeros
|
|
892
1314
|
fast_fail_zeros = (self.expected_pattern == "zeroed")
|
|
893
|
-
|
|
1315
|
+
|
|
894
1316
|
# For unmarked disks: track if ALL bytes are zero
|
|
895
1317
|
all_zeros = (self.expected_pattern is None)
|
|
896
|
-
|
|
1318
|
+
|
|
897
1319
|
# Track section results for debugging
|
|
898
1320
|
self.section_results = [] # Store (section_idx, result, stats)
|
|
899
1321
|
|
|
900
1322
|
try:
|
|
901
1323
|
# Open with regular buffered I/O
|
|
902
|
-
|
|
903
|
-
fd = os.open(self.device_path, os.O_RDONLY)
|
|
904
|
-
else:
|
|
905
|
-
fd = None
|
|
1324
|
+
fd = os.open(self.device_path, os.O_RDONLY)
|
|
906
1325
|
|
|
907
1326
|
read_chunk_size = 64 * 1024 # 64KB chunks
|
|
908
1327
|
SAMPLE_STEP = 23 # Sample every 23rd byte (~4% of data) - prime for even distribution
|
|
909
1328
|
|
|
910
1329
|
# Skip marker area
|
|
911
|
-
marker_skip =
|
|
1330
|
+
marker_skip = WipeTask.BUFFER_SIZE
|
|
912
1331
|
usable_size = self.total_size - marker_skip
|
|
913
|
-
|
|
1332
|
+
|
|
914
1333
|
# Divide disk into 100 sections for sampling
|
|
915
1334
|
num_sections = 100
|
|
916
1335
|
section_size = usable_size // num_sections
|
|
@@ -949,8 +1368,7 @@ class WipeJob:
|
|
|
949
1368
|
verified_in_section = 0
|
|
950
1369
|
|
|
951
1370
|
# Seek to position in this section
|
|
952
|
-
|
|
953
|
-
os.lseek(fd, read_pos, os.SEEK_SET)
|
|
1371
|
+
os.lseek(fd, read_pos, os.SEEK_SET)
|
|
954
1372
|
|
|
955
1373
|
# Read and analyze THIS SECTION
|
|
956
1374
|
while verified_in_section < bytes_to_verify:
|
|
@@ -959,18 +1377,14 @@ class WipeJob:
|
|
|
959
1377
|
|
|
960
1378
|
chunk_size = min(read_chunk_size, bytes_to_verify - verified_in_section)
|
|
961
1379
|
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
else:
|
|
966
|
-
data = os.read(fd, chunk_size)
|
|
967
|
-
if not data:
|
|
968
|
-
break
|
|
1380
|
+
data = os.read(fd, chunk_size)
|
|
1381
|
+
if not data:
|
|
1382
|
+
break
|
|
969
1383
|
|
|
970
1384
|
# --------------------------------------------------
|
|
971
1385
|
# SECTION ANALYSIS
|
|
972
1386
|
# --------------------------------------------------
|
|
973
|
-
|
|
1387
|
+
|
|
974
1388
|
# FAST zero check for zeroed pattern
|
|
975
1389
|
if fast_fail_zeros:
|
|
976
1390
|
# Ultra-fast: compare against pre-allocated zero pattern
|
|
@@ -990,12 +1404,12 @@ class WipeJob:
|
|
|
990
1404
|
# Use memoryview for fast slicing
|
|
991
1405
|
mv = memoryview(data)
|
|
992
1406
|
data_len = len(data)
|
|
993
|
-
|
|
1407
|
+
|
|
994
1408
|
# Sample every SAMPLE_STEP-th byte
|
|
995
1409
|
for i in range(0, data_len, SAMPLE_STEP):
|
|
996
1410
|
section_byte_counts[mv[i]] += 1
|
|
997
1411
|
section_samples += 1
|
|
998
|
-
|
|
1412
|
+
|
|
999
1413
|
# --------------------------------------------------
|
|
1000
1414
|
# END SECTION ANALYSIS
|
|
1001
1415
|
# --------------------------------------------------
|
|
@@ -1012,7 +1426,7 @@ class WipeJob:
|
|
|
1012
1426
|
# Already passed zero check if we got here
|
|
1013
1427
|
section_result = "zeroed"
|
|
1014
1428
|
section_stats = {}
|
|
1015
|
-
|
|
1429
|
+
|
|
1016
1430
|
elif all_zeros:
|
|
1017
1431
|
if not section_found_nonzero:
|
|
1018
1432
|
section_result = "zeroed"
|
|
@@ -1022,15 +1436,15 @@ class WipeJob:
|
|
|
1022
1436
|
section_result, section_stats = self._analyze_section_randomness(
|
|
1023
1437
|
section_byte_counts, section_samples
|
|
1024
1438
|
)
|
|
1025
|
-
|
|
1439
|
+
|
|
1026
1440
|
else: # Expected random
|
|
1027
1441
|
section_result, section_stats = self._analyze_section_randomness(
|
|
1028
1442
|
section_byte_counts, section_samples
|
|
1029
1443
|
)
|
|
1030
|
-
|
|
1444
|
+
|
|
1031
1445
|
# Store section result
|
|
1032
1446
|
self.section_results.append((section_idx, section_result, section_stats))
|
|
1033
|
-
|
|
1447
|
+
|
|
1034
1448
|
# Check if section failed
|
|
1035
1449
|
if (self.expected_pattern == "random" and section_result != "random") or \
|
|
1036
1450
|
(self.expected_pattern == "zeroed" and section_result != "zeroed") or \
|
|
@@ -1055,7 +1469,7 @@ class WipeJob:
|
|
|
1055
1469
|
zeroed_sections = sum(1 for _, result, _ in self.section_results if result == "zeroed")
|
|
1056
1470
|
random_sections = sum(1 for _, result, _ in self.section_results if result == "random")
|
|
1057
1471
|
total_checked = len([r for _, r, _ in self.section_results if r != "skipped"])
|
|
1058
|
-
|
|
1472
|
+
|
|
1059
1473
|
if zeroed_sections == total_checked:
|
|
1060
1474
|
self.verify_result = "zeroed"
|
|
1061
1475
|
self.expected_pattern = "zeroed"
|
|
@@ -1074,7 +1488,7 @@ class WipeJob:
|
|
|
1074
1488
|
# Determine from section consensus
|
|
1075
1489
|
zeroed_sections = sum(1 for _, result, _ in self.section_results if result == "zeroed")
|
|
1076
1490
|
random_sections = sum(1 for _, result, _ in self.section_results if result == "random")
|
|
1077
|
-
|
|
1491
|
+
|
|
1078
1492
|
if zeroed_sections > random_sections:
|
|
1079
1493
|
self.verify_result = "zeroed"
|
|
1080
1494
|
self.expected_pattern = "zeroed"
|
|
@@ -1095,7 +1509,7 @@ class WipeJob:
|
|
|
1095
1509
|
"""Analyze if a section appears random"""
|
|
1096
1510
|
if total_samples < 100:
|
|
1097
1511
|
return "insufficient-data", {"samples": total_samples}
|
|
1098
|
-
|
|
1512
|
+
|
|
1099
1513
|
# Calculate statistics
|
|
1100
1514
|
max_count = max(byte_counts)
|
|
1101
1515
|
max_freq = max_count / total_samples
|
|
@@ -1105,7 +1519,7 @@ class WipeJob:
|
|
|
1105
1519
|
|
|
1106
1520
|
# Count completely unused bytes
|
|
1107
1521
|
unused_bytes = sum(1 for count in byte_counts if count == 0)
|
|
1108
|
-
|
|
1522
|
+
|
|
1109
1523
|
# Calculate expected frequency and variance
|
|
1110
1524
|
expected = total_samples / 256
|
|
1111
1525
|
if expected > 0:
|
|
@@ -1115,19 +1529,19 @@ class WipeJob:
|
|
|
1115
1529
|
cv = std_dev / expected
|
|
1116
1530
|
else:
|
|
1117
1531
|
cv = float('inf')
|
|
1118
|
-
|
|
1532
|
+
|
|
1119
1533
|
# Decision logic for "random"
|
|
1120
1534
|
# Good random data should:
|
|
1121
1535
|
# 1. Use most byte values (>200 unique)
|
|
1122
1536
|
# 2. No single byte dominates (<2% frequency)
|
|
1123
1537
|
# 3. Relatively even distribution (CV < 2.0)
|
|
1124
1538
|
# 4. Not too many zeros (if it's supposed to be random, not zeroed)
|
|
1125
|
-
|
|
1539
|
+
|
|
1126
1540
|
is_random = (unique_bytes > 200 and # >78% of bytes used
|
|
1127
1541
|
max_freq < 0.02 and # No byte > 2%
|
|
1128
1542
|
cv < 2.0 and # Not too lumpy
|
|
1129
1543
|
byte_counts[0] / total_samples < 0.5) # Not mostly zeros
|
|
1130
|
-
|
|
1544
|
+
|
|
1131
1545
|
stats = {
|
|
1132
1546
|
"samples": total_samples,
|
|
1133
1547
|
"max_freq": max_freq,
|
|
@@ -1136,7 +1550,7 @@ class WipeJob:
|
|
|
1136
1550
|
"cv": cv,
|
|
1137
1551
|
"zero_freq": byte_counts[0] / total_samples if total_samples > 0 else 0
|
|
1138
1552
|
}
|
|
1139
|
-
|
|
1553
|
+
|
|
1140
1554
|
if is_random:
|
|
1141
1555
|
return "random", stats
|
|
1142
1556
|
else:
|
|
@@ -1153,6 +1567,37 @@ class WipeJob:
|
|
|
1153
1567
|
|
|
1154
1568
|
|
|
1155
1569
|
|
|
1570
|
+
def _write_final_marker(self):
|
|
1571
|
+
"""Write final marker after write tasks complete (shows 100% completion)
|
|
1572
|
+
|
|
1573
|
+
This marker indicates the wipe is complete, regardless of verification.
|
|
1574
|
+
Does NOT include verify_status - that's just for display/logging.
|
|
1575
|
+
"""
|
|
1576
|
+
try:
|
|
1577
|
+
# Determine final pattern from last WriteTask in task sequence
|
|
1578
|
+
is_random = False # Default to zeros
|
|
1579
|
+
last_write_task = None
|
|
1580
|
+
for task in reversed(self.tasks):
|
|
1581
|
+
if isinstance(task, WriteTask):
|
|
1582
|
+
last_write_task = task
|
|
1583
|
+
break
|
|
1584
|
+
|
|
1585
|
+
if last_write_task:
|
|
1586
|
+
is_random = isinstance(last_write_task, WriteRandTask)
|
|
1587
|
+
|
|
1588
|
+
# Write marker WITHOUT verify_status (write completion only)
|
|
1589
|
+
with open(self.device_path, 'r+b') as marker_file:
|
|
1590
|
+
marker_file.seek(0)
|
|
1591
|
+
marker_file.write(self.prep_marker_buffer(is_random, verify_status=None))
|
|
1592
|
+
marker_file.flush()
|
|
1593
|
+
os.fsync(marker_file.fileno())
|
|
1594
|
+
|
|
1595
|
+
except Exception:
|
|
1596
|
+
# Log error but don't fail the job - marker write is not critical
|
|
1597
|
+
# The wipe itself succeeded
|
|
1598
|
+
if not self.exception: # Don't overwrite existing exception
|
|
1599
|
+
self.exception = f"Final marker write failed: {traceback.format_exc()}"
|
|
1600
|
+
|
|
1156
1601
|
def _write_marker_with_verify_status(self, is_random):
|
|
1157
1602
|
"""Write marker buffer with verification status if verification was performed
|
|
1158
1603
|
|
|
@@ -1198,23 +1643,25 @@ class WipeJob:
|
|
|
1198
1643
|
self.total_written = self.total_size # Mark as fully wiped
|
|
1199
1644
|
# Write marker for this previously unmarked disk
|
|
1200
1645
|
elif existing_marker:
|
|
1201
|
-
# Only write if verify status changed
|
|
1202
|
-
if existing_verify_status == new_verify_status:
|
|
1203
|
-
return
|
|
1204
1646
|
# Preserve original scrubbed_bytes if this is a verify-only job
|
|
1205
1647
|
if self.total_written == 0:
|
|
1206
1648
|
self.total_written = existing_marker.scrubbed_bytes
|
|
1649
|
+
# For verify-only jobs, only write if verify status changed
|
|
1650
|
+
if existing_verify_status == new_verify_status:
|
|
1651
|
+
return
|
|
1652
|
+
# Otherwise, always update marker (to reflect wipe completion or status change)
|
|
1207
1653
|
else:
|
|
1208
1654
|
# No marker and verify failed - don't write marker
|
|
1209
1655
|
return
|
|
1210
1656
|
|
|
1211
1657
|
# Write marker with verification status
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1658
|
+
with open(self.device_path, 'r+b') as device:
|
|
1659
|
+
device.seek(0)
|
|
1660
|
+
marker_buffer = self.prep_marker_buffer(is_random,
|
|
1661
|
+
verify_status=new_verify_status)
|
|
1662
|
+
device.write(marker_buffer)
|
|
1663
|
+
device.flush()
|
|
1664
|
+
os.fsync(device.fileno())
|
|
1218
1665
|
|
|
1219
1666
|
except Exception:
|
|
1220
1667
|
# Catch ANY exception in this method to ensure self.done is always set
|
|
@@ -1225,19 +1672,20 @@ class WipeJob:
|
|
|
1225
1672
|
|
|
1226
1673
|
|
|
1227
1674
|
# Initialize the class-level buffers with mmap for O_DIRECT alignment
|
|
1228
|
-
|
|
1675
|
+
# Now using WipeTask base class for shared buffers
|
|
1676
|
+
if WipeTask.buffer is None:
|
|
1229
1677
|
# Allocate random buffer with mmap (page-aligned for O_DIRECT)
|
|
1230
|
-
|
|
1678
|
+
WipeTask.buffer_mem = mmap.mmap(-1, WipeTask.BUFFER_SIZE,
|
|
1231
1679
|
flags=mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS)
|
|
1232
|
-
raw_buffer = os.urandom(
|
|
1680
|
+
raw_buffer = os.urandom(WipeTask.BUFFER_SIZE)
|
|
1233
1681
|
rebalanced = WipeJob._rebalance_buffer(raw_buffer)
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1682
|
+
WipeTask.buffer_mem.write(rebalanced)
|
|
1683
|
+
WipeTask.buffer_mem.seek(0)
|
|
1684
|
+
WipeTask.buffer = memoryview(WipeTask.buffer_mem)
|
|
1237
1685
|
|
|
1238
1686
|
# Allocate zero buffer with mmap
|
|
1239
|
-
|
|
1687
|
+
WipeTask.zero_buffer_mem = mmap.mmap(-1, WipeTask.WRITE_SIZE,
|
|
1240
1688
|
flags=mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS)
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1689
|
+
WipeTask.zero_buffer_mem.write(b'\x00' * WipeTask.WRITE_SIZE)
|
|
1690
|
+
WipeTask.zero_buffer_mem.seek(0)
|
|
1691
|
+
WipeTask.zero_buffer = memoryview(WipeTask.zero_buffer_mem)
|