dwipe 2.0.1__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dwipe/DeviceChangeMonitor.py +244 -0
- dwipe/DeviceInfo.py +703 -177
- dwipe/DeviceWorker.py +566 -0
- dwipe/DiskWipe.py +953 -214
- dwipe/DrivePreChecker.py +203 -0
- dwipe/FirmwareWipeTask.py +865 -0
- dwipe/NvmeTool.py +225 -0
- dwipe/PersistentState.py +45 -16
- dwipe/Prereqs.py +84 -0
- dwipe/SataTool.py +499 -0
- dwipe/StructuredLogger.py +644 -0
- dwipe/Tunables.py +62 -0
- dwipe/Utils.py +298 -3
- dwipe/VerifyTask.py +412 -0
- dwipe/WipeJob.py +631 -171
- dwipe/WipeTask.py +150 -0
- dwipe/WriteTask.py +402 -0
- dwipe/main.py +34 -9
- dwipe-3.0.0.dist-info/METADATA +566 -0
- dwipe-3.0.0.dist-info/RECORD +24 -0
- dwipe/ToolManager.py +0 -637
- dwipe/WipeJobFuture.py +0 -245
- dwipe-2.0.1.dist-info/METADATA +0 -410
- dwipe-2.0.1.dist-info/RECORD +0 -14
- {dwipe-2.0.1.dist-info → dwipe-3.0.0.dist-info}/WHEEL +0 -0
- {dwipe-2.0.1.dist-info → dwipe-3.0.0.dist-info}/entry_points.txt +0 -0
- {dwipe-2.0.1.dist-info → dwipe-3.0.0.dist-info}/licenses/LICENSE +0 -0
dwipe/WipeJob.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
WipeJob class for handling disk/partition wiping operations
|
|
3
3
|
|
|
4
|
+
Orchestrates task sequences (write and verify operations).
|
|
4
5
|
"""
|
|
5
6
|
# pylint: disable=broad-exception-raised,broad-exception-caught
|
|
6
7
|
import os
|
|
@@ -14,25 +15,18 @@ import mmap
|
|
|
14
15
|
from types import SimpleNamespace
|
|
15
16
|
|
|
16
17
|
from .Utils import Utils
|
|
18
|
+
from .WipeTask import WipeTask
|
|
19
|
+
from .WriteTask import WriteTask, WriteZeroTask, WriteRandTask
|
|
20
|
+
from .VerifyTask import VerifyTask, VerifyZeroTask, VerifyRandTask
|
|
21
|
+
from .FirmwareWipeTask import FirmwareWipeTask
|
|
17
22
|
|
|
18
23
|
|
|
19
24
|
class WipeJob:
|
|
20
|
-
"""Handles disk/partition wiping operations with progress tracking
|
|
25
|
+
"""Handles disk/partition wiping operations with progress tracking
|
|
21
26
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
BUFFER_SIZE = WRITE_SIZE # Same size for O_DIRECT
|
|
26
|
-
|
|
27
|
-
# Marker constants (separate from O_DIRECT writes)
|
|
28
|
-
MARKER_SIZE = 16 * 1024 # 16KB for marker
|
|
29
|
-
STATE_OFFSET = 15 * 1024 # where json is written (for marker buffer)
|
|
30
|
-
|
|
31
|
-
# Aligned buffers allocated with mmap (initialized at module load)
|
|
32
|
-
buffer = None # Random data buffer (memoryview)
|
|
33
|
-
buffer_mem = None # Underlying mmap object
|
|
34
|
-
zero_buffer = None # Zero buffer (memoryview)
|
|
35
|
-
zero_buffer_mem = None # Underlying mmap object
|
|
27
|
+
Note: Constants and buffers are now defined in WipeTask base class.
|
|
28
|
+
WipeJob uses WipeTask.BLOCK_SIZE, WipeTask.WRITE_SIZE, WipeTask.buffer, etc.
|
|
29
|
+
"""
|
|
36
30
|
|
|
37
31
|
@staticmethod
|
|
38
32
|
def _get_dirty_kb():
|
|
@@ -124,26 +118,40 @@ class WipeJob:
|
|
|
124
118
|
|
|
125
119
|
return bytes(result)
|
|
126
120
|
|
|
127
|
-
def __init__(self, device_path, total_size, opts=None,
|
|
128
|
-
|
|
121
|
+
def __init__(self, device_path, total_size, opts=None, tasks=None):
|
|
122
|
+
"""Initialize WipeJob as a task orchestrator
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
device_path: Path to device (e.g., '/dev/sda1')
|
|
126
|
+
total_size: Total size in bytes
|
|
127
|
+
opts: Options namespace
|
|
128
|
+
tasks: List of WipeTask instances to execute sequentially (if None, legacy mode)
|
|
129
|
+
"""
|
|
130
|
+
self.opts = opts
|
|
129
131
|
self.device_path = device_path
|
|
130
132
|
self.total_size = total_size
|
|
131
133
|
self.do_abort = False
|
|
132
134
|
self.thread = None
|
|
133
135
|
|
|
136
|
+
# Task orchestration (new)
|
|
137
|
+
self.tasks = tasks if tasks else [] # List of WipeTask instances
|
|
138
|
+
self.current_task = None # Currently executing task
|
|
139
|
+
self.current_task_index = 0 # Index of current task
|
|
140
|
+
|
|
141
|
+
# Legacy attributes (kept for backwards compatibility)
|
|
134
142
|
self.start_mono = time.monotonic() # Track the start time
|
|
135
|
-
self.total_written =
|
|
136
|
-
self.resume_from =
|
|
137
|
-
self.resume_mode =
|
|
143
|
+
self.total_written = 0 # Will be set by tasks
|
|
144
|
+
self.resume_from = 0 # Track resume offset
|
|
145
|
+
self.resume_mode = None # Original mode if resuming (overrides opts.wipe_mode)
|
|
138
146
|
self.wr_hists = [] # list of (mono, written)
|
|
139
147
|
self.done = False
|
|
140
148
|
self.exception = None # in case of issues
|
|
141
149
|
|
|
142
|
-
# Multi-pass tracking
|
|
150
|
+
# Multi-pass tracking (legacy, now handled by task sequence)
|
|
143
151
|
self.passes = getattr(opts, 'passes', 1) # Total number of passes to perform
|
|
144
152
|
self.current_pass = 0 # Current pass number (0-indexed)
|
|
145
153
|
|
|
146
|
-
# Verification tracking
|
|
154
|
+
# Verification tracking (proxied from verify tasks)
|
|
147
155
|
self.verify_phase = False # True when verifying
|
|
148
156
|
self.verify_start_mono = None # Start time of verify phase
|
|
149
157
|
self.verify_progress = 0 # Bytes verified so far
|
|
@@ -157,9 +165,9 @@ class WipeJob:
|
|
|
157
165
|
self.last_marker_update_mono = time.monotonic() - 25 # Last time we wrote progress marker
|
|
158
166
|
self.marker_update_interval = 30 # Update marker every 30 seconds
|
|
159
167
|
|
|
160
|
-
## SLOWDOWN / STALL DETECTION/ABORT FEATURE
|
|
161
|
-
##
|
|
162
|
-
self.slowdown_stop = getattr(opts, 'slowdown_stop',
|
|
168
|
+
## SLOWDOWN / STALL DETECTION/ABORT FEATURE (proxied from tasks)
|
|
169
|
+
##
|
|
170
|
+
self.slowdown_stop = getattr(opts, 'slowdown_stop', 64)
|
|
163
171
|
self.stall_timeout = getattr(opts, 'stall_timeout', 60)
|
|
164
172
|
self.max_slowdown_ratio = 0
|
|
165
173
|
self.max_stall_secs = 0
|
|
@@ -168,13 +176,13 @@ class WipeJob:
|
|
|
168
176
|
self.baseline_end_mono = None # When baseline measurement ended
|
|
169
177
|
# Stall tracking
|
|
170
178
|
self.last_progress_mono = time.monotonic() # Last time we made progress
|
|
171
|
-
self.last_progress_written =
|
|
179
|
+
self.last_progress_written = 0 # Bytes written at last progress check
|
|
172
180
|
# For periodic slowdown checks (every 10 seconds)
|
|
173
181
|
self.last_slowdown_check = 0
|
|
174
182
|
# Initialize write history for speed calculation
|
|
175
|
-
self.wr_hists.append(SimpleNamespace(mono=self.start_mono, written=
|
|
176
|
-
|
|
177
|
-
# ERROR ABORT FEATURE
|
|
183
|
+
self.wr_hists.append(SimpleNamespace(mono=self.start_mono, written=0))
|
|
184
|
+
|
|
185
|
+
# ERROR ABORT FEATURE (proxied from write tasks)
|
|
178
186
|
self.max_consecutive_errors = 3 # a control
|
|
179
187
|
self.max_total_errors = 100 # a control
|
|
180
188
|
self.reopen_on_error = True # a control
|
|
@@ -183,6 +191,165 @@ class WipeJob:
|
|
|
183
191
|
|
|
184
192
|
|
|
185
193
|
|
|
194
|
+
def run_tasks(self):
|
|
195
|
+
"""Execute task sequence and update WipeJob state from tasks
|
|
196
|
+
|
|
197
|
+
This is the main orchestration method that runs each task in sequence.
|
|
198
|
+
It proxies task state to WipeJob attributes for backwards compatibility.
|
|
199
|
+
"""
|
|
200
|
+
# Start abort flag sync thread
|
|
201
|
+
stop_sync = [False]
|
|
202
|
+
|
|
203
|
+
def sync_abort_flag():
|
|
204
|
+
"""Continuously sync WipeJob.do_abort to current task"""
|
|
205
|
+
while not stop_sync[0]:
|
|
206
|
+
if self.current_task and not self.current_task.do_abort:
|
|
207
|
+
self.current_task.do_abort = self.do_abort
|
|
208
|
+
time.sleep(0.1) # Check every 100ms
|
|
209
|
+
|
|
210
|
+
sync_thread = threading.Thread(target=sync_abort_flag, daemon=True)
|
|
211
|
+
sync_thread.start()
|
|
212
|
+
|
|
213
|
+
# Track if all write tasks have completed
|
|
214
|
+
all_writes_complete = False
|
|
215
|
+
last_write_task_index = -1
|
|
216
|
+
|
|
217
|
+
# Find the last WriteTask index
|
|
218
|
+
for i, task in enumerate(self.tasks):
|
|
219
|
+
if isinstance(task, WriteTask):
|
|
220
|
+
last_write_task_index = i
|
|
221
|
+
|
|
222
|
+
try:
|
|
223
|
+
for i, task in enumerate(self.tasks):
|
|
224
|
+
# Skip already-completed tasks (from resume logic)
|
|
225
|
+
if task.done:
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
if self.do_abort:
|
|
229
|
+
break
|
|
230
|
+
|
|
231
|
+
self.current_task = task
|
|
232
|
+
self.current_task_index = i
|
|
233
|
+
|
|
234
|
+
# Set job reference so tasks can access shared state (e.g., for firmware verify)
|
|
235
|
+
task.job = self
|
|
236
|
+
|
|
237
|
+
# Run the task
|
|
238
|
+
task.run_task()
|
|
239
|
+
|
|
240
|
+
# Proxy task state back to WipeJob for compatibility FIRST
|
|
241
|
+
# (before checking exceptions, so we capture metrics even on failure)
|
|
242
|
+
# Write tasks
|
|
243
|
+
if isinstance(task, WriteTask):
|
|
244
|
+
self.total_written = task.total_written
|
|
245
|
+
self.max_slowdown_ratio = max(self.max_slowdown_ratio, task.max_slowdown_ratio)
|
|
246
|
+
self.max_stall_secs = max(self.max_stall_secs, task.max_stall_secs)
|
|
247
|
+
self.total_errors = task.total_errors
|
|
248
|
+
self.reopen_count = task.reopen_count
|
|
249
|
+
# Set expected pattern from write task
|
|
250
|
+
if isinstance(task, WriteZeroTask):
|
|
251
|
+
self.expected_pattern = "zeroed"
|
|
252
|
+
elif isinstance(task, WriteRandTask):
|
|
253
|
+
self.expected_pattern = "random"
|
|
254
|
+
|
|
255
|
+
# Write final marker after last write task completes successfully
|
|
256
|
+
if i == last_write_task_index and not task.exception and not self.do_abort:
|
|
257
|
+
all_writes_complete = True
|
|
258
|
+
self._write_final_marker()
|
|
259
|
+
|
|
260
|
+
# Verify tasks
|
|
261
|
+
elif isinstance(task, VerifyTask):
|
|
262
|
+
self.verify_phase = True
|
|
263
|
+
if self.verify_start_mono is None:
|
|
264
|
+
self.verify_start_mono = task.start_mono
|
|
265
|
+
self.verify_progress = task.total_written # VerifyTask uses total_written for progress
|
|
266
|
+
self.verify_pct = task.verify_pct
|
|
267
|
+
# Extract verify result from task summary
|
|
268
|
+
summary = task.get_summary_dict()
|
|
269
|
+
self.verify_result = summary.get('result', None)
|
|
270
|
+
|
|
271
|
+
# Write marker with verify status after verification completes
|
|
272
|
+
if not task.exception and not self.do_abort and self.verify_result:
|
|
273
|
+
is_random = (self.expected_pattern == "random")
|
|
274
|
+
self._write_marker_with_verify_status(is_random)
|
|
275
|
+
|
|
276
|
+
# Check for task errors (AFTER proxying state)
|
|
277
|
+
if task.exception:
|
|
278
|
+
# For write tasks and firmware wipe tasks, failure is critical - set exception and break
|
|
279
|
+
if isinstance(task, (WriteTask, FirmwareWipeTask)):
|
|
280
|
+
self.exception = task.exception
|
|
281
|
+
# Sync abort state before breaking
|
|
282
|
+
if task.do_abort:
|
|
283
|
+
self.do_abort = True
|
|
284
|
+
break
|
|
285
|
+
# For other tasks (precheck, pre-verify, post-verify), continue without setting job exception
|
|
286
|
+
# (these are support tasks - only the actual wipe matters for job success/failure)
|
|
287
|
+
|
|
288
|
+
# Check if task was aborted (sync abort state)
|
|
289
|
+
if task.do_abort and not self.do_abort:
|
|
290
|
+
self.do_abort = True
|
|
291
|
+
break
|
|
292
|
+
|
|
293
|
+
finally:
|
|
294
|
+
# Stop abort sync thread
|
|
295
|
+
stop_sync[0] = True
|
|
296
|
+
|
|
297
|
+
# Write marker on stop to capture current progress (not just last 30s marker)
|
|
298
|
+
if self.do_abort and self.total_written > 0 and not all_writes_complete:
|
|
299
|
+
# Stopped mid-wipe - write progress marker
|
|
300
|
+
# Determine pattern from last completed WriteTask
|
|
301
|
+
is_random = False # Default to zeros
|
|
302
|
+
for task in reversed(self.tasks):
|
|
303
|
+
if isinstance(task, WriteTask) and task.total_written > 0:
|
|
304
|
+
is_random = isinstance(task, WriteRandTask)
|
|
305
|
+
break
|
|
306
|
+
|
|
307
|
+
try:
|
|
308
|
+
abort_reason = self._extract_abort_reason()
|
|
309
|
+
with open(self.device_path, 'r+b') as marker_file:
|
|
310
|
+
marker_file.seek(0)
|
|
311
|
+
marker_file.write(self.prep_marker_buffer(is_random, verify_status=None,
|
|
312
|
+
abort_reason=abort_reason))
|
|
313
|
+
marker_file.flush()
|
|
314
|
+
os.fsync(marker_file.fileno())
|
|
315
|
+
except Exception:
|
|
316
|
+
pass # Don't fail the stop on marker write error
|
|
317
|
+
|
|
318
|
+
# Always mark as done when tasks complete
|
|
319
|
+
self.done = True
|
|
320
|
+
self.current_task = None
|
|
321
|
+
|
|
322
|
+
@staticmethod
|
|
323
|
+
def _get_pass_pattern_static(pass_number, total_passes, desired_mode):
|
|
324
|
+
"""Static version of get_pass_pattern for use in start_job()
|
|
325
|
+
|
|
326
|
+
Determine what pattern to write for a given pass.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
pass_number: 0-indexed pass number
|
|
330
|
+
total_passes: Total number of passes
|
|
331
|
+
desired_mode: 'Rand' or 'Zero' - the final desired pattern
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
bool: True for random, False for zeros
|
|
335
|
+
"""
|
|
336
|
+
if total_passes == 1:
|
|
337
|
+
# Single pass: just write desired pattern
|
|
338
|
+
return desired_mode == 'Rand'
|
|
339
|
+
|
|
340
|
+
# Multi-pass: alternate patterns, ending on desired
|
|
341
|
+
# Final pass is always desired pattern
|
|
342
|
+
if pass_number == total_passes - 1:
|
|
343
|
+
return desired_mode == 'Rand'
|
|
344
|
+
|
|
345
|
+
# Earlier passes: alternate, starting with opposite
|
|
346
|
+
if desired_mode == 'Rand':
|
|
347
|
+
# Even passes (0, 2, ...) = Zero, odd (1, 3, ...) = Rand
|
|
348
|
+
return pass_number % 2 == 1
|
|
349
|
+
else:
|
|
350
|
+
# Even passes (0, 2, ...) = Rand, odd (1, 3, ...) = Zero
|
|
351
|
+
return pass_number % 2 == 0
|
|
352
|
+
|
|
186
353
|
@staticmethod
|
|
187
354
|
def start_job(device_path, total_size, opts):
|
|
188
355
|
"""Start a wipe job in a background thread
|
|
@@ -221,8 +388,8 @@ class WipeJob:
|
|
|
221
388
|
# Partial/stopped wipe - resume from where it left off
|
|
222
389
|
resume_from = scrubbed
|
|
223
390
|
# Ensure we don't resume in the marker area
|
|
224
|
-
if resume_from <
|
|
225
|
-
resume_from =
|
|
391
|
+
if resume_from < WipeTask.MARKER_SIZE:
|
|
392
|
+
resume_from = WipeTask.MARKER_SIZE
|
|
226
393
|
# Also ensure not past the end (sanity check)
|
|
227
394
|
if resume_from > total_size * getattr(opts, 'passes', 1):
|
|
228
395
|
resume_from = 0 # Start over if marker corrupted
|
|
@@ -249,9 +416,63 @@ class WipeJob:
|
|
|
249
416
|
# Pattern matches - resume from current position
|
|
250
417
|
resume_from = scrubbed
|
|
251
418
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
419
|
+
# Build task sequence
|
|
420
|
+
tasks = []
|
|
421
|
+
mode = getattr(opts, 'wipe_mode', 'Rand')
|
|
422
|
+
base_mode = mode.replace('+V', '') # Remove verification suffix
|
|
423
|
+
auto_verify = '+V' in mode
|
|
424
|
+
passes = getattr(opts, 'passes', 1)
|
|
425
|
+
|
|
426
|
+
# Check if mode changed - if so, don't resume (start fresh)
|
|
427
|
+
if resume_mode and resume_mode != base_mode:
|
|
428
|
+
# User changed wipe mode (e.g., Zero -> Rand) - start fresh
|
|
429
|
+
resume_from = 0
|
|
430
|
+
resume_mode = None
|
|
431
|
+
|
|
432
|
+
# Use resume_mode if resuming, otherwise use current mode
|
|
433
|
+
desired_mode = resume_mode if resume_mode else base_mode
|
|
434
|
+
|
|
435
|
+
# Build write task sequence (alternating patterns for multi-pass)
|
|
436
|
+
for pass_num in range(passes):
|
|
437
|
+
# Determine pattern for this pass
|
|
438
|
+
is_random = WipeJob._get_pass_pattern_static(pass_num, passes, desired_mode)
|
|
439
|
+
|
|
440
|
+
# Calculate pass offset and resume point
|
|
441
|
+
pass_start = pass_num * total_size
|
|
442
|
+
pass_resume = 0
|
|
443
|
+
if resume_from > pass_start:
|
|
444
|
+
# Resume within this pass
|
|
445
|
+
pass_resume = resume_from - pass_start
|
|
446
|
+
|
|
447
|
+
# Create appropriate write task
|
|
448
|
+
if is_random:
|
|
449
|
+
task = WriteRandTask(device_path, total_size, opts,
|
|
450
|
+
resume_from=pass_resume, pass_number=pass_num)
|
|
451
|
+
else:
|
|
452
|
+
task = WriteZeroTask(device_path, total_size, opts,
|
|
453
|
+
resume_from=pass_resume, pass_number=pass_num)
|
|
454
|
+
tasks.append(task)
|
|
455
|
+
|
|
456
|
+
# If we haven't reached the resume point yet, skip this task
|
|
457
|
+
if resume_from >= (pass_num + 1) * total_size:
|
|
458
|
+
# This pass is already complete, mark task as done
|
|
459
|
+
task.done = True
|
|
460
|
+
task.total_written = total_size
|
|
461
|
+
|
|
462
|
+
# Add auto-verification task if requested
|
|
463
|
+
if auto_verify:
|
|
464
|
+
verify_pct = getattr(opts, 'verify_pct', 2)
|
|
465
|
+
if desired_mode == 'Rand':
|
|
466
|
+
task = VerifyRandTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
467
|
+
else:
|
|
468
|
+
task = VerifyZeroTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
469
|
+
tasks.append(task)
|
|
470
|
+
|
|
471
|
+
# Create WipeJob with task sequence
|
|
472
|
+
job = WipeJob(device_path=device_path, total_size=total_size, opts=opts, tasks=tasks)
|
|
473
|
+
job.resume_from = resume_from
|
|
474
|
+
job.resume_mode = resume_mode
|
|
475
|
+
job.thread = threading.Thread(target=job.run_tasks)
|
|
255
476
|
job.thread.start()
|
|
256
477
|
return job
|
|
257
478
|
|
|
@@ -265,44 +486,43 @@ class WipeJob:
|
|
|
265
486
|
opts: Options namespace with verify_pct
|
|
266
487
|
expected_pattern: "zeroed", "random", or None (auto-detect)
|
|
267
488
|
"""
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
489
|
+
# Read existing marker to determine expected pattern if not specified
|
|
490
|
+
device_name = os.path.basename(device_path)
|
|
491
|
+
existing_marker = WipeJob.read_marker_buffer(device_name)
|
|
492
|
+
if existing_marker and expected_pattern is None:
|
|
493
|
+
expected_pattern = "random" if existing_marker.mode == 'Rand' else "zeroed"
|
|
494
|
+
|
|
271
495
|
verify_pct = getattr(opts, 'verify_pct', 0)
|
|
272
496
|
if verify_pct == 0:
|
|
273
497
|
verify_pct = 2 # Default to 2% if not set
|
|
274
498
|
|
|
275
|
-
#
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
499
|
+
# Create verify task
|
|
500
|
+
tasks = []
|
|
501
|
+
if expected_pattern == "random":
|
|
502
|
+
task = VerifyRandTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
503
|
+
else:
|
|
504
|
+
task = VerifyZeroTask(device_path, total_size, opts, verify_pct=verify_pct)
|
|
505
|
+
tasks.append(task)
|
|
506
|
+
|
|
507
|
+
# Create WipeJob with verify task
|
|
508
|
+
job = WipeJob(device_path=device_path, total_size=total_size, opts=opts, tasks=tasks)
|
|
509
|
+
job.is_verify_only = True
|
|
510
|
+
job.expected_pattern = expected_pattern
|
|
511
|
+
job.verify_phase = True
|
|
281
512
|
|
|
282
513
|
def verify_runner():
|
|
283
514
|
try:
|
|
284
|
-
#
|
|
285
|
-
|
|
286
|
-
existing_marker = WipeJob.read_marker_buffer(device_name)
|
|
287
|
-
if existing_marker:
|
|
288
|
-
# Infer expected pattern from marker if not already set
|
|
289
|
-
if job.expected_pattern is None:
|
|
290
|
-
job.expected_pattern = "random" if existing_marker.mode == 'Rand' else "zeroed"
|
|
291
|
-
|
|
292
|
-
job.verify_partition(verify_pct)
|
|
515
|
+
# Run the verify task
|
|
516
|
+
job.run_tasks()
|
|
293
517
|
|
|
294
518
|
# Write marker with verification status
|
|
295
519
|
if existing_marker:
|
|
296
520
|
is_random = existing_marker.mode == 'Rand'
|
|
297
521
|
job._write_marker_with_verify_status(is_random)
|
|
298
|
-
# Note: _write_marker_with_verify_status sets job.done in its finally block
|
|
299
|
-
else:
|
|
300
|
-
# No marker - just mark as done
|
|
301
|
-
job.done = True
|
|
302
522
|
except Exception:
|
|
303
523
|
job.exception = traceback.format_exc()
|
|
304
524
|
finally:
|
|
305
|
-
# ALWAYS ensure job is marked as done
|
|
525
|
+
# ALWAYS ensure job is marked as done
|
|
306
526
|
if not job.done:
|
|
307
527
|
job.done = True
|
|
308
528
|
|
|
@@ -314,50 +534,50 @@ class WipeJob:
|
|
|
314
534
|
"""Check for stall (no progress) - called frequently"""
|
|
315
535
|
if self.stall_timeout <= 0:
|
|
316
536
|
return False
|
|
317
|
-
|
|
537
|
+
|
|
318
538
|
time_since_progress = current_monotonic - self.last_progress_mono
|
|
319
539
|
self.max_stall_secs = max(time_since_progress, self.max_stall_secs)
|
|
320
540
|
if time_since_progress >= self.stall_timeout:
|
|
321
541
|
self.do_abort = True
|
|
322
542
|
self.exception = f"Stall detected: No progress for {time_since_progress:.1f} seconds"
|
|
323
543
|
return True
|
|
324
|
-
|
|
544
|
+
|
|
325
545
|
return False
|
|
326
546
|
|
|
327
547
|
def _check_for_slowdown(self, current_monotonic):
|
|
328
548
|
"""Check for slowdown - called every 10 seconds"""
|
|
329
549
|
if self.slowdown_stop <= 0 or self.baseline_speed is None or self.baseline_speed <= 0:
|
|
330
550
|
return False
|
|
331
|
-
|
|
551
|
+
|
|
332
552
|
# Calculate current speed over last 30 seconds
|
|
333
553
|
floor = current_monotonic - 30
|
|
334
554
|
recent_history = [h for h in self.wr_hists if h.mono >= floor]
|
|
335
|
-
|
|
555
|
+
|
|
336
556
|
if len(recent_history) >= 2:
|
|
337
557
|
recent_start = recent_history[0]
|
|
338
558
|
recent_written = self.total_written - recent_start.written
|
|
339
559
|
recent_elapsed = current_monotonic - recent_start.mono
|
|
340
|
-
|
|
560
|
+
|
|
341
561
|
if recent_elapsed > 1.0:
|
|
342
562
|
current_speed = recent_written / recent_elapsed
|
|
343
563
|
self.baseline_speed = max(self.baseline_speed, current_speed)
|
|
344
564
|
slowdown_ratio = self.baseline_speed / max(current_speed, 1)
|
|
345
565
|
slowdown_ratio = int(round(slowdown_ratio, 0))
|
|
346
566
|
self.max_slowdown_ratio = max(self.max_slowdown_ratio, slowdown_ratio)
|
|
347
|
-
|
|
567
|
+
|
|
348
568
|
if slowdown_ratio > self.slowdown_stop:
|
|
349
569
|
self.do_abort = True
|
|
350
570
|
self.exception = (f"Slowdown abort: ({Utils.human(current_speed)}B/s)"
|
|
351
571
|
f" is 1/{slowdown_ratio} baseline")
|
|
352
572
|
return True
|
|
353
|
-
|
|
573
|
+
|
|
354
574
|
return False
|
|
355
575
|
|
|
356
576
|
def _update_baseline_if_needed(self, current_monotonic):
|
|
357
577
|
"""Update baseline speed measurement if still in first 60 seconds"""
|
|
358
578
|
if self.baseline_speed is not None:
|
|
359
579
|
return # Baseline already established
|
|
360
|
-
|
|
580
|
+
|
|
361
581
|
if (current_monotonic - self.start_mono) >= 60:
|
|
362
582
|
total_written_60s = self.total_written - self.resume_from
|
|
363
583
|
elapsed_60s = current_monotonic - self.start_mono
|
|
@@ -377,13 +597,23 @@ class WipeJob:
|
|
|
377
597
|
f"Completed: {percent_complete:.2f}%")
|
|
378
598
|
|
|
379
599
|
def get_status(self):
|
|
380
|
-
"""Get status tuple: (elapsed, percent, rate, eta)
|
|
600
|
+
"""Get status tuple: (elapsed, percent, rate, eta, more_state)
|
|
381
601
|
|
|
382
602
|
Returns stats for current phase only:
|
|
383
603
|
- Write phase (0-100%): elapsed/rate/eta for writing
|
|
384
604
|
- Flushing phase: 100% FLUSH while kernel syncs to device
|
|
385
605
|
- Verify phase (v0-v100%): elapsed/rate/eta for verification only
|
|
606
|
+
- more_state: optional extra status from derived task class
|
|
386
607
|
"""
|
|
608
|
+
# NEW: Proxy to current task if using task-based architecture
|
|
609
|
+
if self.current_task is not None:
|
|
610
|
+
# Continuously proxy task metrics to job for display (especially for WriteTask)
|
|
611
|
+
if isinstance(self.current_task, WriteTask):
|
|
612
|
+
self.max_slowdown_ratio = max(self.max_slowdown_ratio, self.current_task.max_slowdown_ratio)
|
|
613
|
+
self.max_stall_secs = max(self.max_stall_secs, self.current_task.max_stall_secs)
|
|
614
|
+
return self.current_task.get_status()
|
|
615
|
+
|
|
616
|
+
# LEGACY: Original implementation for backwards compatibility
|
|
387
617
|
pct_str, rate_str, when_str = '', '', ''
|
|
388
618
|
mono = time.monotonic()
|
|
389
619
|
|
|
@@ -426,7 +656,7 @@ class WipeJob:
|
|
|
426
656
|
else:
|
|
427
657
|
when_str = '0'
|
|
428
658
|
|
|
429
|
-
return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str
|
|
659
|
+
return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str, ""
|
|
430
660
|
else:
|
|
431
661
|
# Write phase: 0-100% (across all passes)
|
|
432
662
|
written = self.total_written
|
|
@@ -463,9 +693,210 @@ class WipeJob:
|
|
|
463
693
|
else:
|
|
464
694
|
when_str = '0'
|
|
465
695
|
|
|
466
|
-
return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str
|
|
696
|
+
return Utils.ago_str(int(round(elapsed_time))), pct_str, rate_str, when_str, ""
|
|
697
|
+
|
|
698
|
+
def get_plan_dict(self, mode=None):
|
|
699
|
+
"""Generate plan dictionary for structured logging
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
mode: Optional mode override (e.g., 'Rand', 'Zero', 'Rand+V')
|
|
703
|
+
If None, uses self.opts.wipe_mode
|
|
704
|
+
|
|
705
|
+
Returns:
|
|
706
|
+
dict: Plan section with operation, steps, mode, verify settings, passes
|
|
707
|
+
"""
|
|
708
|
+
if mode is None:
|
|
709
|
+
mode = getattr(self.opts, 'wipe_mode', 'Unknown')
|
|
710
|
+
|
|
711
|
+
# Build steps list
|
|
712
|
+
steps = []
|
|
713
|
+
|
|
714
|
+
# Extract base mode (remove +V suffix)
|
|
715
|
+
base_mode = mode.replace('+V', '')
|
|
716
|
+
verify_in_mode = '+V' in mode
|
|
717
|
+
|
|
718
|
+
# Add wipe steps (one per pass)
|
|
719
|
+
for pass_num in range(self.passes):
|
|
720
|
+
if self.passes > 1:
|
|
721
|
+
steps.append(f"wipe {base_mode} {self.device_path} (pass {pass_num + 1}/{self.passes})")
|
|
722
|
+
else:
|
|
723
|
+
steps.append(f"wipe {base_mode} {self.device_path}")
|
|
724
|
+
|
|
725
|
+
# Add verify step if enabled
|
|
726
|
+
verify_pct = getattr(self.opts, 'verify_pct', 0)
|
|
727
|
+
if verify_in_mode or verify_pct > 0:
|
|
728
|
+
if verify_pct > 0 and verify_pct < 100:
|
|
729
|
+
steps.append(f"verify {base_mode} ({verify_pct}% sample)")
|
|
730
|
+
else:
|
|
731
|
+
steps.append(f"verify {base_mode}")
|
|
732
|
+
|
|
733
|
+
return {
|
|
734
|
+
"operation": "verify" if self.is_verify_only else "wipe",
|
|
735
|
+
"steps": steps,
|
|
736
|
+
"mode": base_mode,
|
|
737
|
+
"verify_enabled": verify_in_mode or verify_pct > 0,
|
|
738
|
+
"verify_pct": verify_pct,
|
|
739
|
+
"passes": self.passes,
|
|
740
|
+
"slowdown_stop_threshold": self.slowdown_stop,
|
|
741
|
+
"stall_timeout_threshold": self.stall_timeout,
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
def get_summary_dict(self):
|
|
745
|
+
"""Generate complete summary dictionary for structured logging
|
|
746
|
+
|
|
747
|
+
Returns:
|
|
748
|
+
dict: Summary with top-level aggregates and per-step details
|
|
749
|
+
"""
|
|
750
|
+
# NEW: Aggregate task summaries if using task-based architecture
|
|
751
|
+
if self.tasks:
|
|
752
|
+
mono = time.monotonic()
|
|
753
|
+
total_elapsed = mono - self.start_mono
|
|
754
|
+
|
|
755
|
+
# Aggregate all task summaries
|
|
756
|
+
steps = []
|
|
757
|
+
total_errors = 0
|
|
758
|
+
for task in self.tasks:
|
|
759
|
+
task_summary = task.get_summary_dict()
|
|
760
|
+
steps.append(task_summary)
|
|
761
|
+
total_errors += task_summary.get('errors', 0)
|
|
762
|
+
|
|
763
|
+
# Calculate actual percentage complete from total work done
|
|
764
|
+
total_work = self.total_size * self.passes
|
|
765
|
+
pct_complete = min(100, (self.total_written / total_work) * 100 if total_work > 0 else 0)
|
|
766
|
+
|
|
767
|
+
# Extract abort reason and error message if stopped with exception
|
|
768
|
+
abort_reason = None
|
|
769
|
+
error_message = None
|
|
770
|
+
if self.do_abort and self.exception:
|
|
771
|
+
abort_reason = self._extract_abort_reason()
|
|
772
|
+
error_message = self.exception
|
|
773
|
+
|
|
774
|
+
# Build top-level summary
|
|
775
|
+
summary = {
|
|
776
|
+
"result": "stopped" if self.do_abort else ("failed" if self.exception else "completed"),
|
|
777
|
+
"total_elapsed": Utils.ago_str(int(total_elapsed)),
|
|
778
|
+
"total_errors": total_errors,
|
|
779
|
+
"pct_complete": round(pct_complete, 1),
|
|
780
|
+
"resumed_from_bytes": self.resume_from,
|
|
781
|
+
"steps": steps,
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
# Add error information if present
|
|
785
|
+
if abort_reason:
|
|
786
|
+
summary["abort_reason"] = abort_reason
|
|
787
|
+
if error_message:
|
|
788
|
+
summary["error_message"] = error_message
|
|
789
|
+
|
|
790
|
+
return summary
|
|
791
|
+
|
|
792
|
+
# LEGACY: Original implementation for backwards compatibility
|
|
793
|
+
mono = time.monotonic()
|
|
794
|
+
write_elapsed = mono - self.start_mono
|
|
795
|
+
|
|
796
|
+
# Calculate write rates
|
|
797
|
+
write_rate_bps = self.total_written / write_elapsed if write_elapsed > 0 else 0
|
|
798
|
+
|
|
799
|
+
# Calculate completion percentage
|
|
800
|
+
total_work = self.total_size * self.passes
|
|
801
|
+
pct_complete = min(100, (self.total_written / total_work) * 100 if total_work > 0 else 0)
|
|
802
|
+
|
|
803
|
+
# Build wipe step
|
|
804
|
+
mode = getattr(self.opts, 'wipe_mode', 'Unknown').replace('+V', '')
|
|
805
|
+
wipe_step = {
|
|
806
|
+
"step": f"wipe {mode} {self.device_path}",
|
|
807
|
+
"elapsed": Utils.ago_str(int(write_elapsed)),
|
|
808
|
+
"rate": f"{Utils.human(int(write_rate_bps))}/s",
|
|
809
|
+
"bytes_written": self.total_written,
|
|
810
|
+
"bytes_total": total_work,
|
|
811
|
+
"passes_total": self.passes,
|
|
812
|
+
"passes_completed": min(self.total_written // self.total_size, self.passes),
|
|
813
|
+
"current_pass": self.current_pass,
|
|
814
|
+
"peak_write_rate": f"{Utils.human(int(self.baseline_speed))}/s" if self.baseline_speed else None,
|
|
815
|
+
"worst_stall": Utils.ago_str(int(self.max_stall_secs)),
|
|
816
|
+
"worst_slowdown_ratio": round(self.max_slowdown_ratio, 1),
|
|
817
|
+
"errors": self.total_errors,
|
|
818
|
+
"reopen_count": self.reopen_count,
|
|
819
|
+
}
|
|
467
820
|
|
|
468
|
-
|
|
821
|
+
# Build steps array
|
|
822
|
+
steps = [wipe_step]
|
|
823
|
+
|
|
824
|
+
# Add verification step if verify was done
|
|
825
|
+
total_elapsed = write_elapsed
|
|
826
|
+
if self.verify_start_mono:
|
|
827
|
+
verify_elapsed = mono - self.verify_start_mono
|
|
828
|
+
total_elapsed = write_elapsed + verify_elapsed
|
|
829
|
+
verify_rate_bps = self.verify_progress / verify_elapsed if verify_elapsed > 0 else 0
|
|
830
|
+
|
|
831
|
+
# Extract verify detail from verify_result if it contains extra info
|
|
832
|
+
verify_detail = None
|
|
833
|
+
if self.verify_result and '(' in str(self.verify_result):
|
|
834
|
+
# Extract detail from results like "not-wiped (non-zero at 22K)"
|
|
835
|
+
verify_detail = str(self.verify_result).split('(')[1].rstrip(')')
|
|
836
|
+
|
|
837
|
+
verify_pct = getattr(self.opts, 'verify_pct', 0)
|
|
838
|
+
verify_label = f"verify {mode}"
|
|
839
|
+
if verify_pct > 0 and verify_pct < 100:
|
|
840
|
+
verify_label += f" ({verify_pct}% sample)"
|
|
841
|
+
|
|
842
|
+
verify_step = {
|
|
843
|
+
"step": verify_label,
|
|
844
|
+
"elapsed": Utils.ago_str(int(verify_elapsed)),
|
|
845
|
+
"rate": f"{Utils.human(int(verify_rate_bps))}/s",
|
|
846
|
+
"bytes_checked": self.verify_progress,
|
|
847
|
+
"result": self.verify_result,
|
|
848
|
+
}
|
|
849
|
+
if verify_detail:
|
|
850
|
+
verify_step["verify_detail"] = verify_detail
|
|
851
|
+
|
|
852
|
+
steps.append(verify_step)
|
|
853
|
+
|
|
854
|
+
# Extract abort reason and error message if stopped with exception
|
|
855
|
+
abort_reason = None
|
|
856
|
+
error_message = None
|
|
857
|
+
if self.do_abort and self.exception:
|
|
858
|
+
abort_reason = self._extract_abort_reason()
|
|
859
|
+
error_message = self.exception
|
|
860
|
+
|
|
861
|
+
# Build top-level summary
|
|
862
|
+
summary = {
|
|
863
|
+
"result": "stopped" if self.do_abort else ("failed" if self.exception else "completed"),
|
|
864
|
+
"total_elapsed": Utils.ago_str(int(total_elapsed)),
|
|
865
|
+
"total_errors": self.total_errors,
|
|
866
|
+
"pct_complete": round(pct_complete, 1),
|
|
867
|
+
"resumed_from_bytes": self.resume_from,
|
|
868
|
+
"steps": steps,
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
# Add error information if present
|
|
872
|
+
if abort_reason:
|
|
873
|
+
summary["abort_reason"] = abort_reason
|
|
874
|
+
if error_message:
|
|
875
|
+
summary["error_message"] = error_message
|
|
876
|
+
|
|
877
|
+
return summary
|
|
878
|
+
|
|
879
|
+
def _extract_abort_reason(self):
|
|
880
|
+
"""Extract short error reason from exception for marker
|
|
881
|
+
|
|
882
|
+
Returns short lowercase error type like 'slowdown', 'stall', or None
|
|
883
|
+
if no user-facing abort reason exists (only used for abnormal stops).
|
|
884
|
+
"""
|
|
885
|
+
if not self.exception:
|
|
886
|
+
return None
|
|
887
|
+
|
|
888
|
+
exc_lower = self.exception.lower()
|
|
889
|
+
|
|
890
|
+
# Check for known abort conditions
|
|
891
|
+
if 'slowdown abort' in exc_lower:
|
|
892
|
+
return 'slowdown'
|
|
893
|
+
elif 'stall detected' in exc_lower:
|
|
894
|
+
return 'stall'
|
|
895
|
+
|
|
896
|
+
# Don't record internal errors in marker (marker write failures, tracebacks, etc.)
|
|
897
|
+
return None
|
|
898
|
+
|
|
899
|
+
def prep_marker_buffer(self, is_random, verify_status=None, abort_reason=None):
|
|
469
900
|
"""Get the 1st 16KB to write:
|
|
470
901
|
- 15K zeros
|
|
471
902
|
- JSON status + zero fill to 1KB
|
|
@@ -477,10 +908,12 @@ class WipeJob:
|
|
|
477
908
|
- passes: Number of passes intended/completed
|
|
478
909
|
- mode: 'Rand' or 'Zero' (final desired pattern)
|
|
479
910
|
- verify_status: 'pass', 'fail', or omitted (not verified)
|
|
911
|
+
- abort_reason: short error description if job failed abnormally
|
|
480
912
|
|
|
481
913
|
Args:
|
|
482
914
|
is_random: bool, whether random data was written
|
|
483
915
|
verify_status: str, "pass", "fail", or None (not verified)
|
|
916
|
+
abort_reason: str, short error description or None (no error)
|
|
484
917
|
"""
|
|
485
918
|
data = {"unixtime": int(time.time()),
|
|
486
919
|
"scrubbed_bytes": self.total_written,
|
|
@@ -490,12 +923,14 @@ class WipeJob:
|
|
|
490
923
|
}
|
|
491
924
|
if verify_status is not None:
|
|
492
925
|
data["verify_status"] = verify_status
|
|
926
|
+
if abort_reason is not None:
|
|
927
|
+
data["abort_reason"] = abort_reason
|
|
493
928
|
json_data = json.dumps(data).encode('utf-8')
|
|
494
|
-
buffer = bytearray(
|
|
495
|
-
buffer[:
|
|
496
|
-
buffer[
|
|
497
|
-
remaining_size =
|
|
498
|
-
buffer[
|
|
929
|
+
buffer = bytearray(WipeTask.MARKER_SIZE) # Only 16KB, not 1MB
|
|
930
|
+
buffer[:WipeTask.STATE_OFFSET] = b'\x00' * WipeTask.STATE_OFFSET
|
|
931
|
+
buffer[WipeTask.STATE_OFFSET:WipeTask.STATE_OFFSET + len(json_data)] = json_data
|
|
932
|
+
remaining_size = WipeTask.MARKER_SIZE - (WipeTask.STATE_OFFSET + len(json_data))
|
|
933
|
+
buffer[WipeTask.STATE_OFFSET + len(json_data):] = b'\x00' * remaining_size
|
|
499
934
|
return buffer
|
|
500
935
|
|
|
501
936
|
def get_pass_pattern(self, pass_number, desired_mode):
|
|
@@ -555,6 +990,8 @@ class WipeJob:
|
|
|
555
990
|
with open(self.device_path, 'r+b') as marker_file:
|
|
556
991
|
marker_file.seek(0)
|
|
557
992
|
marker_file.write(self.prep_marker_buffer(is_random))
|
|
993
|
+
marker_file.flush()
|
|
994
|
+
os.fsync(marker_file.fileno())
|
|
558
995
|
self.last_marker_update_mono = now_mono
|
|
559
996
|
except Exception:
|
|
560
997
|
# If marker update fails, just continue - we'll try again in 30s
|
|
@@ -577,7 +1014,7 @@ class WipeJob:
|
|
|
577
1014
|
try:
|
|
578
1015
|
with open(device_path, 'rb') as device:
|
|
579
1016
|
# Skip past the marker area (first 16KB)
|
|
580
|
-
device.seek(
|
|
1017
|
+
device.seek(WipeTask.WRITE_SIZE)
|
|
581
1018
|
data = device.read(sample_size)
|
|
582
1019
|
|
|
583
1020
|
if not data:
|
|
@@ -623,15 +1060,15 @@ class WipeJob:
|
|
|
623
1060
|
try:
|
|
624
1061
|
with open(f'/dev/{device_name}', 'rb') as device:
|
|
625
1062
|
device.seek(0)
|
|
626
|
-
buffer = device.read(
|
|
1063
|
+
buffer = device.read(WipeTask.MARKER_SIZE)
|
|
627
1064
|
except Exception:
|
|
628
1065
|
return None # cannot find info
|
|
629
1066
|
|
|
630
|
-
if buffer[:
|
|
1067
|
+
if buffer[:WipeTask.STATE_OFFSET] != b'\x00' * (WipeTask.STATE_OFFSET):
|
|
631
1068
|
return None # First 15 KB are not zeros
|
|
632
1069
|
|
|
633
1070
|
# Extract JSON data from the next 1 KB Strip trailing zeros
|
|
634
|
-
json_data_bytes = buffer[
|
|
1071
|
+
json_data_bytes = buffer[WipeTask.STATE_OFFSET:WipeTask.MARKER_SIZE].rstrip(b'\x00')
|
|
635
1072
|
|
|
636
1073
|
if not json_data_bytes:
|
|
637
1074
|
return None # No JSON data found
|
|
@@ -646,13 +1083,13 @@ class WipeJob:
|
|
|
646
1083
|
for key, value in data.items():
|
|
647
1084
|
if key in ('unixtime', 'scrubbed_bytes', 'size_bytes', 'passes') and isinstance(value, int):
|
|
648
1085
|
rv[key] = value
|
|
649
|
-
elif key in ('mode', 'verify_status') and isinstance(value, str):
|
|
1086
|
+
elif key in ('mode', 'verify_status', 'abort_reason') and isinstance(value, str):
|
|
650
1087
|
rv[key] = value
|
|
651
1088
|
else:
|
|
652
1089
|
return None # bogus data
|
|
653
|
-
# Old markers: 4 fields (no passes, no verify_status)
|
|
654
|
-
# New markers: 5 fields minimum (with passes), 6 with verify_status
|
|
655
|
-
if len(rv) < 4 or len(rv) >
|
|
1090
|
+
# Old markers: 4 fields (no passes, no verify_status, no abort_reason)
|
|
1091
|
+
# New markers: 5 fields minimum (with passes), 6 with verify_status, 7 with abort_reason
|
|
1092
|
+
if len(rv) < 4 or len(rv) > 7:
|
|
656
1093
|
return None # bogus data
|
|
657
1094
|
return SimpleNamespace(**rv)
|
|
658
1095
|
|
|
@@ -684,10 +1121,7 @@ class WipeJob:
|
|
|
684
1121
|
|
|
685
1122
|
# Open device with O_DIRECT for unbuffered I/O (bypasses page cache)
|
|
686
1123
|
# O_DIRECT gives maximum performance with zero dirty pages
|
|
687
|
-
|
|
688
|
-
fd = os.open(self.device_path, os.O_WRONLY | os.O_DIRECT)
|
|
689
|
-
else:
|
|
690
|
-
fd = None
|
|
1124
|
+
fd = os.open(self.device_path, os.O_WRONLY | os.O_DIRECT)
|
|
691
1125
|
|
|
692
1126
|
try:
|
|
693
1127
|
# Continue writing until we reach target_bytes
|
|
@@ -695,18 +1129,17 @@ class WipeJob:
|
|
|
695
1129
|
# Calculate current pass and offset within pass
|
|
696
1130
|
self.current_pass = self.total_written // self.total_size
|
|
697
1131
|
offset_in_pass = self.total_written % self.total_size
|
|
698
|
-
|
|
1132
|
+
|
|
699
1133
|
# SKIP MARKER AREA - don't overwrite it!
|
|
700
|
-
if offset_in_pass <
|
|
701
|
-
self.total_written +=
|
|
702
|
-
offset_in_pass =
|
|
1134
|
+
if offset_in_pass < WipeTask.MARKER_SIZE:
|
|
1135
|
+
self.total_written += WipeTask.MARKER_SIZE - offset_in_pass
|
|
1136
|
+
offset_in_pass = WipeTask.MARKER_SIZE
|
|
703
1137
|
|
|
704
1138
|
# Determine pattern for this pass (alternating for multi-pass)
|
|
705
1139
|
is_random_pass = self.get_pass_pattern(self.current_pass, desired_mode)
|
|
706
1140
|
|
|
707
1141
|
# Seek to current position (O_DIRECT requires block-aligned seeks)
|
|
708
|
-
|
|
709
|
-
os.lseek(fd, offset_in_pass, os.SEEK_SET)
|
|
1142
|
+
os.lseek(fd, offset_in_pass, os.SEEK_SET)
|
|
710
1143
|
|
|
711
1144
|
# Write until end of current pass or target_bytes, whichever comes first
|
|
712
1145
|
pass_remaining = self.total_size - offset_in_pass
|
|
@@ -720,11 +1153,11 @@ class WipeJob:
|
|
|
720
1153
|
|
|
721
1154
|
# Update baseline if needed (first 60 seconds)
|
|
722
1155
|
self._update_baseline_if_needed(current_mono)
|
|
723
|
-
|
|
1156
|
+
|
|
724
1157
|
# Check for stall (frequently)
|
|
725
1158
|
if self._check_for_stall(current_mono):
|
|
726
1159
|
break
|
|
727
|
-
|
|
1160
|
+
|
|
728
1161
|
# Check for slowdown (every 10 seconds)
|
|
729
1162
|
if self.baseline_speed is not None:
|
|
730
1163
|
time_since_last_check = current_mono - self.last_slowdown_check
|
|
@@ -732,47 +1165,43 @@ class WipeJob:
|
|
|
732
1165
|
if self._check_for_slowdown(current_mono):
|
|
733
1166
|
break
|
|
734
1167
|
self.last_slowdown_check = current_mono
|
|
735
|
-
|
|
1168
|
+
|
|
736
1169
|
# Update progress tracking
|
|
737
1170
|
if self.total_written > self.last_progress_written:
|
|
738
1171
|
self.last_progress_mono = current_mono
|
|
739
1172
|
|
|
740
|
-
|
|
1173
|
+
|
|
741
1174
|
# Calculate chunk size (must be block-aligned for O_DIRECT)
|
|
742
1175
|
remaining = bytes_to_write_this_pass - pass_bytes_written
|
|
743
|
-
chunk_size = min(
|
|
1176
|
+
chunk_size = min(WipeTask.WRITE_SIZE, remaining)
|
|
744
1177
|
# Round down to block boundary
|
|
745
|
-
chunk_size = (chunk_size //
|
|
1178
|
+
chunk_size = (chunk_size // WipeTask.BLOCK_SIZE) * WipeTask.BLOCK_SIZE
|
|
746
1179
|
if chunk_size == 0:
|
|
747
1180
|
break
|
|
748
1181
|
|
|
749
1182
|
# Select buffer based on pass type
|
|
750
1183
|
if is_random_pass:
|
|
751
1184
|
# Use slice of random buffer (still aligned via memoryview)
|
|
752
|
-
chunk =
|
|
1185
|
+
chunk = WipeTask.buffer[:chunk_size]
|
|
753
1186
|
else:
|
|
754
1187
|
# Use zero buffer
|
|
755
|
-
chunk =
|
|
1188
|
+
chunk = WipeTask.zero_buffer[:chunk_size]
|
|
756
1189
|
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
# Save exception for debugging
|
|
766
|
-
self.exception = str(e)
|
|
767
|
-
self.do_abort = True
|
|
768
|
-
bytes_written = 0
|
|
1190
|
+
try:
|
|
1191
|
+
# Write with O_DIRECT (bypasses page cache)
|
|
1192
|
+
bytes_written, fd = self.safe_write(fd, chunk)
|
|
1193
|
+
except Exception as e:
|
|
1194
|
+
# Save exception for debugging
|
|
1195
|
+
self.exception = str(e)
|
|
1196
|
+
self.do_abort = True
|
|
1197
|
+
bytes_written = 0
|
|
769
1198
|
|
|
770
1199
|
self.total_written += bytes_written
|
|
771
1200
|
pass_bytes_written += bytes_written
|
|
772
1201
|
|
|
773
1202
|
# Periodically update marker for crash recovery (every 30s)
|
|
774
1203
|
# Note: marker writes use separate buffered file handle
|
|
775
|
-
if
|
|
1204
|
+
if self.total_written > WipeTask.MARKER_SIZE:
|
|
776
1205
|
marker_is_random = (desired_mode == 'Rand')
|
|
777
1206
|
self.maybe_update_marker(marker_is_random)
|
|
778
1207
|
|
|
@@ -789,20 +1218,25 @@ class WipeJob:
|
|
|
789
1218
|
# Write final marker buffer at beginning after ALL passes complete
|
|
790
1219
|
# Skip marker write on abort to avoid blocking on problematic devices
|
|
791
1220
|
# Use separate buffered file handle (marker is not O_DIRECT aligned)
|
|
792
|
-
if
|
|
1221
|
+
if self.total_written > 0 and not self.do_abort:
|
|
793
1222
|
try:
|
|
794
1223
|
final_is_random = (desired_mode == 'Rand')
|
|
795
1224
|
with open(self.device_path, 'r+b') as marker_file:
|
|
796
1225
|
marker_file.seek(0)
|
|
797
1226
|
marker_file.write(self.prep_marker_buffer(final_is_random))
|
|
1227
|
+
marker_file.flush()
|
|
1228
|
+
os.fsync(marker_file.fileno())
|
|
798
1229
|
except Exception:
|
|
799
|
-
|
|
1230
|
+
# Log marker write failure but don't fail the whole job
|
|
1231
|
+
self.exception = f"Marker write failed: {traceback.format_exc()}"
|
|
800
1232
|
|
|
801
1233
|
# Auto-start verification if enabled and write completed successfully
|
|
802
1234
|
verify_pct = getattr(self.opts, 'verify_pct', 0)
|
|
803
1235
|
auto_verify = getattr(self.opts, 'wipe_mode', "").endswith('+V')
|
|
804
|
-
if auto_verify and
|
|
805
|
-
|
|
1236
|
+
if auto_verify and not self.do_abort and not self.exception:
|
|
1237
|
+
# Default to 2% verification if +V mode enabled but verify_pct not set
|
|
1238
|
+
actual_verify_pct = verify_pct if verify_pct > 0 else 2
|
|
1239
|
+
self.verify_partition(actual_verify_pct)
|
|
806
1240
|
# Write marker with verification status after verification completes
|
|
807
1241
|
# Use desired_mode to determine if random or zero
|
|
808
1242
|
is_random = (desired_mode == 'Rand')
|
|
@@ -818,13 +1252,13 @@ class WipeJob:
|
|
|
818
1252
|
|
|
819
1253
|
def safe_write(self, fd, chunk):
|
|
820
1254
|
"""Safe write with error recovery.
|
|
821
|
-
|
|
1255
|
+
|
|
822
1256
|
Returns:
|
|
823
1257
|
tuple: (bytes_written, fd) - bytes_written is either:
|
|
824
1258
|
- Actual bytes written (success)
|
|
825
1259
|
- len(chunk) (failed but non-fatal - skip entire chunk)
|
|
826
1260
|
fd might be new if reopened
|
|
827
|
-
|
|
1261
|
+
|
|
828
1262
|
Raises:
|
|
829
1263
|
Exception: If should abort (too many consecutive errors)
|
|
830
1264
|
"""
|
|
@@ -834,7 +1268,7 @@ class WipeJob:
|
|
|
834
1268
|
bytes_written = os.write(fd, chunk)
|
|
835
1269
|
self.reopen_count = 0
|
|
836
1270
|
return bytes_written, fd # success
|
|
837
|
-
|
|
1271
|
+
|
|
838
1272
|
except Exception as e:
|
|
839
1273
|
consecutive_errors += 1
|
|
840
1274
|
self.total_errors += 1
|
|
@@ -890,27 +1324,24 @@ class WipeJob:
|
|
|
890
1324
|
|
|
891
1325
|
# Fast-fail for zeros
|
|
892
1326
|
fast_fail_zeros = (self.expected_pattern == "zeroed")
|
|
893
|
-
|
|
1327
|
+
|
|
894
1328
|
# For unmarked disks: track if ALL bytes are zero
|
|
895
1329
|
all_zeros = (self.expected_pattern is None)
|
|
896
|
-
|
|
1330
|
+
|
|
897
1331
|
# Track section results for debugging
|
|
898
1332
|
self.section_results = [] # Store (section_idx, result, stats)
|
|
899
1333
|
|
|
900
1334
|
try:
|
|
901
1335
|
# Open with regular buffered I/O
|
|
902
|
-
|
|
903
|
-
fd = os.open(self.device_path, os.O_RDONLY)
|
|
904
|
-
else:
|
|
905
|
-
fd = None
|
|
1336
|
+
fd = os.open(self.device_path, os.O_RDONLY)
|
|
906
1337
|
|
|
907
1338
|
read_chunk_size = 64 * 1024 # 64KB chunks
|
|
908
1339
|
SAMPLE_STEP = 23 # Sample every 23rd byte (~4% of data) - prime for even distribution
|
|
909
1340
|
|
|
910
1341
|
# Skip marker area
|
|
911
|
-
marker_skip =
|
|
1342
|
+
marker_skip = WipeTask.BUFFER_SIZE
|
|
912
1343
|
usable_size = self.total_size - marker_skip
|
|
913
|
-
|
|
1344
|
+
|
|
914
1345
|
# Divide disk into 100 sections for sampling
|
|
915
1346
|
num_sections = 100
|
|
916
1347
|
section_size = usable_size // num_sections
|
|
@@ -949,8 +1380,7 @@ class WipeJob:
|
|
|
949
1380
|
verified_in_section = 0
|
|
950
1381
|
|
|
951
1382
|
# Seek to position in this section
|
|
952
|
-
|
|
953
|
-
os.lseek(fd, read_pos, os.SEEK_SET)
|
|
1383
|
+
os.lseek(fd, read_pos, os.SEEK_SET)
|
|
954
1384
|
|
|
955
1385
|
# Read and analyze THIS SECTION
|
|
956
1386
|
while verified_in_section < bytes_to_verify:
|
|
@@ -959,18 +1389,14 @@ class WipeJob:
|
|
|
959
1389
|
|
|
960
1390
|
chunk_size = min(read_chunk_size, bytes_to_verify - verified_in_section)
|
|
961
1391
|
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
else:
|
|
966
|
-
data = os.read(fd, chunk_size)
|
|
967
|
-
if not data:
|
|
968
|
-
break
|
|
1392
|
+
data = os.read(fd, chunk_size)
|
|
1393
|
+
if not data:
|
|
1394
|
+
break
|
|
969
1395
|
|
|
970
1396
|
# --------------------------------------------------
|
|
971
1397
|
# SECTION ANALYSIS
|
|
972
1398
|
# --------------------------------------------------
|
|
973
|
-
|
|
1399
|
+
|
|
974
1400
|
# FAST zero check for zeroed pattern
|
|
975
1401
|
if fast_fail_zeros:
|
|
976
1402
|
# Ultra-fast: compare against pre-allocated zero pattern
|
|
@@ -990,12 +1416,12 @@ class WipeJob:
|
|
|
990
1416
|
# Use memoryview for fast slicing
|
|
991
1417
|
mv = memoryview(data)
|
|
992
1418
|
data_len = len(data)
|
|
993
|
-
|
|
1419
|
+
|
|
994
1420
|
# Sample every SAMPLE_STEP-th byte
|
|
995
1421
|
for i in range(0, data_len, SAMPLE_STEP):
|
|
996
1422
|
section_byte_counts[mv[i]] += 1
|
|
997
1423
|
section_samples += 1
|
|
998
|
-
|
|
1424
|
+
|
|
999
1425
|
# --------------------------------------------------
|
|
1000
1426
|
# END SECTION ANALYSIS
|
|
1001
1427
|
# --------------------------------------------------
|
|
@@ -1012,7 +1438,7 @@ class WipeJob:
|
|
|
1012
1438
|
# Already passed zero check if we got here
|
|
1013
1439
|
section_result = "zeroed"
|
|
1014
1440
|
section_stats = {}
|
|
1015
|
-
|
|
1441
|
+
|
|
1016
1442
|
elif all_zeros:
|
|
1017
1443
|
if not section_found_nonzero:
|
|
1018
1444
|
section_result = "zeroed"
|
|
@@ -1022,15 +1448,15 @@ class WipeJob:
|
|
|
1022
1448
|
section_result, section_stats = self._analyze_section_randomness(
|
|
1023
1449
|
section_byte_counts, section_samples
|
|
1024
1450
|
)
|
|
1025
|
-
|
|
1451
|
+
|
|
1026
1452
|
else: # Expected random
|
|
1027
1453
|
section_result, section_stats = self._analyze_section_randomness(
|
|
1028
1454
|
section_byte_counts, section_samples
|
|
1029
1455
|
)
|
|
1030
|
-
|
|
1456
|
+
|
|
1031
1457
|
# Store section result
|
|
1032
1458
|
self.section_results.append((section_idx, section_result, section_stats))
|
|
1033
|
-
|
|
1459
|
+
|
|
1034
1460
|
# Check if section failed
|
|
1035
1461
|
if (self.expected_pattern == "random" and section_result != "random") or \
|
|
1036
1462
|
(self.expected_pattern == "zeroed" and section_result != "zeroed") or \
|
|
@@ -1055,7 +1481,7 @@ class WipeJob:
|
|
|
1055
1481
|
zeroed_sections = sum(1 for _, result, _ in self.section_results if result == "zeroed")
|
|
1056
1482
|
random_sections = sum(1 for _, result, _ in self.section_results if result == "random")
|
|
1057
1483
|
total_checked = len([r for _, r, _ in self.section_results if r != "skipped"])
|
|
1058
|
-
|
|
1484
|
+
|
|
1059
1485
|
if zeroed_sections == total_checked:
|
|
1060
1486
|
self.verify_result = "zeroed"
|
|
1061
1487
|
self.expected_pattern = "zeroed"
|
|
@@ -1074,7 +1500,7 @@ class WipeJob:
|
|
|
1074
1500
|
# Determine from section consensus
|
|
1075
1501
|
zeroed_sections = sum(1 for _, result, _ in self.section_results if result == "zeroed")
|
|
1076
1502
|
random_sections = sum(1 for _, result, _ in self.section_results if result == "random")
|
|
1077
|
-
|
|
1503
|
+
|
|
1078
1504
|
if zeroed_sections > random_sections:
|
|
1079
1505
|
self.verify_result = "zeroed"
|
|
1080
1506
|
self.expected_pattern = "zeroed"
|
|
@@ -1095,7 +1521,7 @@ class WipeJob:
|
|
|
1095
1521
|
"""Analyze if a section appears random"""
|
|
1096
1522
|
if total_samples < 100:
|
|
1097
1523
|
return "insufficient-data", {"samples": total_samples}
|
|
1098
|
-
|
|
1524
|
+
|
|
1099
1525
|
# Calculate statistics
|
|
1100
1526
|
max_count = max(byte_counts)
|
|
1101
1527
|
max_freq = max_count / total_samples
|
|
@@ -1105,7 +1531,7 @@ class WipeJob:
|
|
|
1105
1531
|
|
|
1106
1532
|
# Count completely unused bytes
|
|
1107
1533
|
unused_bytes = sum(1 for count in byte_counts if count == 0)
|
|
1108
|
-
|
|
1534
|
+
|
|
1109
1535
|
# Calculate expected frequency and variance
|
|
1110
1536
|
expected = total_samples / 256
|
|
1111
1537
|
if expected > 0:
|
|
@@ -1115,19 +1541,19 @@ class WipeJob:
|
|
|
1115
1541
|
cv = std_dev / expected
|
|
1116
1542
|
else:
|
|
1117
1543
|
cv = float('inf')
|
|
1118
|
-
|
|
1544
|
+
|
|
1119
1545
|
# Decision logic for "random"
|
|
1120
1546
|
# Good random data should:
|
|
1121
1547
|
# 1. Use most byte values (>200 unique)
|
|
1122
1548
|
# 2. No single byte dominates (<2% frequency)
|
|
1123
1549
|
# 3. Relatively even distribution (CV < 2.0)
|
|
1124
1550
|
# 4. Not too many zeros (if it's supposed to be random, not zeroed)
|
|
1125
|
-
|
|
1551
|
+
|
|
1126
1552
|
is_random = (unique_bytes > 200 and # >78% of bytes used
|
|
1127
1553
|
max_freq < 0.02 and # No byte > 2%
|
|
1128
1554
|
cv < 2.0 and # Not too lumpy
|
|
1129
1555
|
byte_counts[0] / total_samples < 0.5) # Not mostly zeros
|
|
1130
|
-
|
|
1556
|
+
|
|
1131
1557
|
stats = {
|
|
1132
1558
|
"samples": total_samples,
|
|
1133
1559
|
"max_freq": max_freq,
|
|
@@ -1136,7 +1562,7 @@ class WipeJob:
|
|
|
1136
1562
|
"cv": cv,
|
|
1137
1563
|
"zero_freq": byte_counts[0] / total_samples if total_samples > 0 else 0
|
|
1138
1564
|
}
|
|
1139
|
-
|
|
1565
|
+
|
|
1140
1566
|
if is_random:
|
|
1141
1567
|
return "random", stats
|
|
1142
1568
|
else:
|
|
@@ -1153,6 +1579,37 @@ class WipeJob:
|
|
|
1153
1579
|
|
|
1154
1580
|
|
|
1155
1581
|
|
|
1582
|
+
def _write_final_marker(self):
|
|
1583
|
+
"""Write final marker after write tasks complete (shows 100% completion)
|
|
1584
|
+
|
|
1585
|
+
This marker indicates the wipe is complete, regardless of verification.
|
|
1586
|
+
Does NOT include verify_status - that's just for display/logging.
|
|
1587
|
+
"""
|
|
1588
|
+
try:
|
|
1589
|
+
# Determine final pattern from last WriteTask in task sequence
|
|
1590
|
+
is_random = False # Default to zeros
|
|
1591
|
+
last_write_task = None
|
|
1592
|
+
for task in reversed(self.tasks):
|
|
1593
|
+
if isinstance(task, WriteTask):
|
|
1594
|
+
last_write_task = task
|
|
1595
|
+
break
|
|
1596
|
+
|
|
1597
|
+
if last_write_task:
|
|
1598
|
+
is_random = isinstance(last_write_task, WriteRandTask)
|
|
1599
|
+
|
|
1600
|
+
# Write marker WITHOUT verify_status (write completion only)
|
|
1601
|
+
with open(self.device_path, 'r+b') as marker_file:
|
|
1602
|
+
marker_file.seek(0)
|
|
1603
|
+
marker_file.write(self.prep_marker_buffer(is_random, verify_status=None))
|
|
1604
|
+
marker_file.flush()
|
|
1605
|
+
os.fsync(marker_file.fileno())
|
|
1606
|
+
|
|
1607
|
+
except Exception:
|
|
1608
|
+
# Log error but don't fail the job - marker write is not critical
|
|
1609
|
+
# The wipe itself succeeded
|
|
1610
|
+
if not self.exception: # Don't overwrite existing exception
|
|
1611
|
+
self.exception = f"Final marker write failed: {traceback.format_exc()}"
|
|
1612
|
+
|
|
1156
1613
|
def _write_marker_with_verify_status(self, is_random):
|
|
1157
1614
|
"""Write marker buffer with verification status if verification was performed
|
|
1158
1615
|
|
|
@@ -1198,23 +1655,25 @@ class WipeJob:
|
|
|
1198
1655
|
self.total_written = self.total_size # Mark as fully wiped
|
|
1199
1656
|
# Write marker for this previously unmarked disk
|
|
1200
1657
|
elif existing_marker:
|
|
1201
|
-
# Only write if verify status changed
|
|
1202
|
-
if existing_verify_status == new_verify_status:
|
|
1203
|
-
return
|
|
1204
1658
|
# Preserve original scrubbed_bytes if this is a verify-only job
|
|
1205
1659
|
if self.total_written == 0:
|
|
1206
1660
|
self.total_written = existing_marker.scrubbed_bytes
|
|
1661
|
+
# For verify-only jobs, only write if verify status changed
|
|
1662
|
+
if existing_verify_status == new_verify_status:
|
|
1663
|
+
return
|
|
1664
|
+
# Otherwise, always update marker (to reflect wipe completion or status change)
|
|
1207
1665
|
else:
|
|
1208
1666
|
# No marker and verify failed - don't write marker
|
|
1209
1667
|
return
|
|
1210
1668
|
|
|
1211
1669
|
# Write marker with verification status
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1670
|
+
with open(self.device_path, 'r+b') as device:
|
|
1671
|
+
device.seek(0)
|
|
1672
|
+
marker_buffer = self.prep_marker_buffer(is_random,
|
|
1673
|
+
verify_status=new_verify_status)
|
|
1674
|
+
device.write(marker_buffer)
|
|
1675
|
+
device.flush()
|
|
1676
|
+
os.fsync(device.fileno())
|
|
1218
1677
|
|
|
1219
1678
|
except Exception:
|
|
1220
1679
|
# Catch ANY exception in this method to ensure self.done is always set
|
|
@@ -1225,19 +1684,20 @@ class WipeJob:
|
|
|
1225
1684
|
|
|
1226
1685
|
|
|
1227
1686
|
# Initialize the class-level buffers with mmap for O_DIRECT alignment
|
|
1228
|
-
|
|
1687
|
+
# Now using WipeTask base class for shared buffers
|
|
1688
|
+
if WipeTask.buffer is None:
|
|
1229
1689
|
# Allocate random buffer with mmap (page-aligned for O_DIRECT)
|
|
1230
|
-
|
|
1690
|
+
WipeTask.buffer_mem = mmap.mmap(-1, WipeTask.BUFFER_SIZE,
|
|
1231
1691
|
flags=mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS)
|
|
1232
|
-
raw_buffer = os.urandom(
|
|
1692
|
+
raw_buffer = os.urandom(WipeTask.BUFFER_SIZE)
|
|
1233
1693
|
rebalanced = WipeJob._rebalance_buffer(raw_buffer)
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1694
|
+
WipeTask.buffer_mem.write(rebalanced)
|
|
1695
|
+
WipeTask.buffer_mem.seek(0)
|
|
1696
|
+
WipeTask.buffer = memoryview(WipeTask.buffer_mem)
|
|
1237
1697
|
|
|
1238
1698
|
# Allocate zero buffer with mmap
|
|
1239
|
-
|
|
1699
|
+
WipeTask.zero_buffer_mem = mmap.mmap(-1, WipeTask.WRITE_SIZE,
|
|
1240
1700
|
flags=mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS)
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1701
|
+
WipeTask.zero_buffer_mem.write(b'\x00' * WipeTask.WRITE_SIZE)
|
|
1702
|
+
WipeTask.zero_buffer_mem.seek(0)
|
|
1703
|
+
WipeTask.zero_buffer = memoryview(WipeTask.zero_buffer_mem)
|