multivol 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- multivol/api.py +678 -191
- multivol/multi_volatility2.py +27 -173
- multivol/multi_volatility3.py +91 -104
- multivol/multivol.py +118 -42
- multivol/strings.py +71 -0
- {multivol-0.1.3.dist-info → multivol-0.1.4.dist-info}/METADATA +1 -1
- multivol-0.1.4.dist-info/RECORD +12 -0
- multivol-0.1.3.dist-info/RECORD +0 -11
- {multivol-0.1.3.dist-info → multivol-0.1.4.dist-info}/WHEEL +0 -0
- {multivol-0.1.3.dist-info → multivol-0.1.4.dist-info}/entry_points.txt +0 -0
- {multivol-0.1.3.dist-info → multivol-0.1.4.dist-info}/licenses/LICENSE +0 -0
- {multivol-0.1.3.dist-info → multivol-0.1.4.dist-info}/top_level.txt +0 -0
multivol/api.py
CHANGED
|
@@ -4,6 +4,7 @@ import os
|
|
|
4
4
|
import docker
|
|
5
5
|
import threading
|
|
6
6
|
import uuid
|
|
7
|
+
import re
|
|
7
8
|
import time
|
|
8
9
|
import sqlite3
|
|
9
10
|
import json
|
|
@@ -13,7 +14,16 @@ from werkzeug.utils import secure_filename
|
|
|
13
14
|
from flask import Flask, request, jsonify, abort, send_from_directory, send_file
|
|
14
15
|
from flask_cors import CORS
|
|
15
16
|
import zipfile
|
|
17
|
+
import zipfile
|
|
16
18
|
import io
|
|
19
|
+
import textwrap
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from .multi_volatility2 import multi_volatility2
|
|
23
|
+
from .multi_volatility3 import multi_volatility3
|
|
24
|
+
except ImportError:
|
|
25
|
+
from multi_volatility2 import multi_volatility2
|
|
26
|
+
from multi_volatility3 import multi_volatility3
|
|
17
27
|
|
|
18
28
|
app = Flask(__name__)
|
|
19
29
|
# Increase max upload size to 16GB (or appropriate limit for dumps)
|
|
@@ -24,6 +34,12 @@ STORAGE_DIR = os.environ.get("STORAGE_DIR", os.path.join(os.getcwd(), "storage")
|
|
|
24
34
|
if not os.path.exists(STORAGE_DIR):
|
|
25
35
|
os.makedirs(STORAGE_DIR)
|
|
26
36
|
|
|
37
|
+
|
|
38
|
+
DB_PATH = os.path.join(STORAGE_DIR, "scans.db")
|
|
39
|
+
SYMBOLS_DIR = os.path.join(os.getcwd(), "volatility3_symbols")
|
|
40
|
+
if not os.path.exists(SYMBOLS_DIR):
|
|
41
|
+
os.makedirs(SYMBOLS_DIR)
|
|
42
|
+
|
|
27
43
|
runner_func = None
|
|
28
44
|
|
|
29
45
|
@app.route('/health', methods=['GET'])
|
|
@@ -38,11 +54,24 @@ def restrict_to_localhost():
|
|
|
38
54
|
|
|
39
55
|
# Allow 127.0.0.1 and ::1 (IPv6 localhost)
|
|
40
56
|
allowed_ips = ["127.0.0.1", "::1"]
|
|
57
|
+
|
|
58
|
+
# Always allow OPTIONS for CORS preflight
|
|
59
|
+
if request.method == 'OPTIONS':
|
|
60
|
+
return
|
|
61
|
+
|
|
41
62
|
if request.remote_addr not in allowed_ips:
|
|
63
|
+
print(f"[WARNING] Access blocked from: {request.remote_addr}")
|
|
42
64
|
abort(403, description="Access forbidden: Only localhost connections allowed, please set DISABLE_LOCALHOST_ONLY=1 to disable this check.")
|
|
43
65
|
|
|
66
|
+
def resolve_host_path(path):
|
|
67
|
+
"""Resolves a container path to a host path for DooD."""
|
|
68
|
+
host_path = os.environ.get("HOST_PATH")
|
|
69
|
+
if host_path and path.startswith(os.getcwd()):
|
|
70
|
+
return os.path.join(host_path, os.path.relpath(path, os.getcwd()))
|
|
71
|
+
return path
|
|
72
|
+
|
|
44
73
|
def init_db():
|
|
45
|
-
conn = sqlite3.connect(
|
|
74
|
+
conn = sqlite3.connect(DB_PATH)
|
|
46
75
|
c = conn.cursor()
|
|
47
76
|
c.execute('''CREATE TABLE IF NOT EXISTS scans
|
|
48
77
|
(uuid TEXT PRIMARY KEY, status TEXT, mode TEXT, os TEXT, volatility_version TEXT, dump_path TEXT, output_dir TEXT, created_at REAL, error TEXT)''')
|
|
@@ -66,9 +95,21 @@ def init_db():
|
|
|
66
95
|
print("[INFO] Migrating DB: Adding 'image' column to scans table")
|
|
67
96
|
c.execute("ALTER TABLE scans ADD COLUMN image TEXT")
|
|
68
97
|
|
|
98
|
+
# Migration: Check if 'config_json' column exists (For storing scan options like fetch_symbol)
|
|
99
|
+
try:
|
|
100
|
+
c.execute("SELECT config_json FROM scans LIMIT 1")
|
|
101
|
+
except sqlite3.OperationalError:
|
|
102
|
+
print("[INFO] Migrating DB: Adding 'config_json' column to scans table")
|
|
103
|
+
c.execute("ALTER TABLE scans ADD COLUMN config_json TEXT")
|
|
104
|
+
|
|
69
105
|
# Table for async dump tasks
|
|
70
106
|
c.execute('''CREATE TABLE IF NOT EXISTS dump_tasks
|
|
71
107
|
(task_id TEXT PRIMARY KEY, scan_id TEXT, status TEXT, output_path TEXT, error TEXT, created_at REAL)''')
|
|
108
|
+
|
|
109
|
+
# Table for module status (Debug/Progress UI)
|
|
110
|
+
c.execute('''CREATE TABLE IF NOT EXISTS scan_module_status
|
|
111
|
+
(id INTEGER PRIMARY KEY AUTOINCREMENT, scan_id TEXT, module TEXT, status TEXT, error_message TEXT, updated_at REAL,
|
|
112
|
+
FOREIGN KEY(scan_id) REFERENCES scans(uuid))''')
|
|
72
113
|
|
|
73
114
|
conn.commit()
|
|
74
115
|
conn.close()
|
|
@@ -82,7 +123,7 @@ def rename_scan(uuid):
|
|
|
82
123
|
if not new_name:
|
|
83
124
|
return jsonify({"error": "Name is required"}), 400
|
|
84
125
|
|
|
85
|
-
conn = sqlite3.connect(
|
|
126
|
+
conn = sqlite3.connect(DB_PATH)
|
|
86
127
|
c = conn.cursor()
|
|
87
128
|
c.execute("UPDATE scans SET name = ? WHERE uuid = ?", (new_name, uuid))
|
|
88
129
|
conn.commit()
|
|
@@ -91,7 +132,7 @@ def rename_scan(uuid):
|
|
|
91
132
|
|
|
92
133
|
@app.route('/scans/<uuid>', methods=['DELETE'])
|
|
93
134
|
def delete_scan(uuid):
|
|
94
|
-
conn = sqlite3.connect(
|
|
135
|
+
conn = sqlite3.connect(DB_PATH)
|
|
95
136
|
conn.row_factory = sqlite3.Row
|
|
96
137
|
c = conn.cursor()
|
|
97
138
|
|
|
@@ -114,7 +155,7 @@ def delete_scan(uuid):
|
|
|
114
155
|
|
|
115
156
|
@app.route('/scans/<uuid>/download', methods=['GET'])
|
|
116
157
|
def download_scan_zip(uuid):
|
|
117
|
-
conn = sqlite3.connect(
|
|
158
|
+
conn = sqlite3.connect(DB_PATH)
|
|
118
159
|
conn.row_factory = sqlite3.Row
|
|
119
160
|
c = conn.cursor()
|
|
120
161
|
c.execute("SELECT output_dir, name FROM scans WHERE uuid = ?", (uuid,))
|
|
@@ -203,7 +244,7 @@ def ingest_results_to_db(scan_id, output_dir):
|
|
|
203
244
|
print(f"[ERROR] Output dir not found: {output_dir}")
|
|
204
245
|
return
|
|
205
246
|
|
|
206
|
-
conn = sqlite3.connect(
|
|
247
|
+
conn = sqlite3.connect(DB_PATH)
|
|
207
248
|
c = conn.cursor()
|
|
208
249
|
|
|
209
250
|
json_files = glob.glob(os.path.join(output_dir, "*_output.json"))
|
|
@@ -261,11 +302,63 @@ def upload_file():
|
|
|
261
302
|
except Exception as e:
|
|
262
303
|
print(f"[ERROR] Failed to save file: {e}")
|
|
263
304
|
return jsonify({"error": str(e)}), 500
|
|
305
|
+
return jsonify({"error": str(e)}), 500
|
|
264
306
|
|
|
307
|
+
@app.route('/symbols', methods=['GET'])
|
|
308
|
+
def list_symbols():
|
|
309
|
+
try:
|
|
310
|
+
symbols_files = []
|
|
311
|
+
for root, dirs, files in os.walk(SYMBOLS_DIR):
|
|
312
|
+
for file in files:
|
|
313
|
+
# We want relative path from symbols root
|
|
314
|
+
abs_path = os.path.join(root, file)
|
|
315
|
+
rel_path = os.path.relpath(abs_path, SYMBOLS_DIR)
|
|
316
|
+
symbols_files.append({
|
|
317
|
+
"name": rel_path,
|
|
318
|
+
"size": os.path.getsize(abs_path),
|
|
319
|
+
"modified": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(abs_path)))
|
|
320
|
+
})
|
|
321
|
+
return jsonify({"symbols": symbols_files})
|
|
322
|
+
except Exception as e:
|
|
323
|
+
return jsonify({"error": str(e)}), 500
|
|
324
|
+
|
|
325
|
+
@app.route('/symbols', methods=['POST'])
|
|
326
|
+
def upload_symbol():
|
|
327
|
+
if 'file' not in request.files:
|
|
328
|
+
return jsonify({"error": "No file part"}), 400
|
|
329
|
+
file = request.files['file']
|
|
330
|
+
if file.filename == '':
|
|
331
|
+
return jsonify({"error": "No chosen file"}), 400
|
|
332
|
+
|
|
333
|
+
if file:
|
|
334
|
+
filename = secure_filename(file.filename)
|
|
335
|
+
save_path = os.path.join(SYMBOLS_DIR, filename)
|
|
336
|
+
|
|
337
|
+
try:
|
|
338
|
+
print(f"[DEBUG] Saving symbol file to {save_path}")
|
|
339
|
+
file.save(save_path)
|
|
340
|
+
|
|
341
|
+
# If zip, unzip?
|
|
342
|
+
if filename.endswith(".zip"):
|
|
343
|
+
# Optional: Unzip if user uploads a full pack
|
|
344
|
+
# For now, let's just save it. User usually uploads .json or .zip for profiles.
|
|
345
|
+
# Actually, Vol3 can use zip files directly if placed correctly, or we might want to unzip.
|
|
346
|
+
# Let's verify what the user likely wants. Usually it's a JSON/ISF.
|
|
347
|
+
pass
|
|
348
|
+
|
|
349
|
+
return jsonify({"status": "success", "path": save_path})
|
|
350
|
+
except Exception as e:
|
|
351
|
+
return jsonify({"error": str(e)}), 500
|
|
265
352
|
@app.route('/scan', methods=['POST'])
|
|
266
353
|
def scan():
|
|
267
354
|
data = request.json
|
|
268
355
|
|
|
356
|
+
# Determine default image based on mode from request
|
|
357
|
+
req_mode = data.get('mode', 'vol3') # Default to vol3 if not specified (though validation requires it)
|
|
358
|
+
default_image = "sp00kyskelet0n/volatility3"
|
|
359
|
+
if req_mode == "vol2":
|
|
360
|
+
default_image = "sp00kyskelet0n/volatility2"
|
|
361
|
+
|
|
269
362
|
# Define default arguments matching CLI defaults and requirements
|
|
270
363
|
default_args = {
|
|
271
364
|
"profiles_path": os.path.join(os.getcwd(), "volatility2_profiles"),
|
|
@@ -281,15 +374,20 @@ def scan():
|
|
|
281
374
|
"mode": None,
|
|
282
375
|
"profile": None,
|
|
283
376
|
"processes": None,
|
|
284
|
-
"host_path": os.environ.get("HOST_PATH") # Added for DooD support via Env
|
|
377
|
+
"host_path": os.environ.get("HOST_PATH"), # Added for DooD support via Env
|
|
378
|
+
"debug": True, # Enable command logging for API
|
|
379
|
+
"fetch_symbol": False,
|
|
380
|
+
"custom_symbol": None,
|
|
381
|
+
"image": default_image
|
|
285
382
|
}
|
|
286
383
|
|
|
287
384
|
args_dict = default_args.copy()
|
|
288
385
|
args_dict.update(data)
|
|
289
386
|
|
|
290
387
|
# Basic Validation
|
|
291
|
-
|
|
292
|
-
|
|
388
|
+
# Basic Validation
|
|
389
|
+
if "dump" not in data or "mode" not in data:
|
|
390
|
+
return jsonify({"error": "Missing required fields: dump, mode"}), 400
|
|
293
391
|
|
|
294
392
|
# Ensure mutual exclusion for OS flags
|
|
295
393
|
is_linux = bool(data.get("linux"))
|
|
@@ -298,9 +396,14 @@ def scan():
|
|
|
298
396
|
if is_linux == is_windows:
|
|
299
397
|
return jsonify({"error": "You must specify either 'linux': true or 'windows': true, but not both or neither."}), 400
|
|
300
398
|
|
|
399
|
+
# Default fetch_symbol to True for Linux if not explicitly provided
|
|
400
|
+
if is_linux and "fetch_symbol" not in data:
|
|
401
|
+
args_dict["fetch_symbol"] = True
|
|
402
|
+
|
|
301
403
|
args_obj = argparse.Namespace(**args_dict)
|
|
302
404
|
|
|
303
405
|
scan_id = str(uuid.uuid4())
|
|
406
|
+
args_obj.scan_id = scan_id # Pass scan_id to runner for status updates
|
|
304
407
|
# Construct output directory with UUID
|
|
305
408
|
base_name = f"volatility2_{scan_id}" if args_obj.mode == "vol2" else f"volatility3_{scan_id}"
|
|
306
409
|
# Use absolute path for output_dir to avoid CWD ambiguity and ensure persistence
|
|
@@ -327,18 +430,51 @@ def scan():
|
|
|
327
430
|
if not os.path.exists(args_obj.dump):
|
|
328
431
|
return jsonify({"error": f"Dump file not found at {args_obj.dump}"}), 400
|
|
329
432
|
|
|
330
|
-
args_obj.image = data.get("image") # Ensure image is passed
|
|
331
433
|
case_name = data.get("name") # Optional custom case name
|
|
332
434
|
|
|
333
|
-
|
|
435
|
+
# Determine command list for pre-population
|
|
436
|
+
try:
|
|
437
|
+
command_list = []
|
|
438
|
+
if args_obj.mode == "vol2":
|
|
439
|
+
vol_instance = multi_volatility2()
|
|
440
|
+
if args_obj.commands:
|
|
441
|
+
command_list = args_obj.commands.split(",")
|
|
442
|
+
elif args_obj.windows:
|
|
443
|
+
command_list = vol_instance.getCommands("windows.light" if args_obj.light else "windows.full")
|
|
444
|
+
elif args_obj.linux:
|
|
445
|
+
command_list = vol_instance.getCommands("linux.light" if args_obj.light else "linux.full")
|
|
446
|
+
else: # vol3
|
|
447
|
+
vol_instance = multi_volatility3()
|
|
448
|
+
if args_obj.commands:
|
|
449
|
+
command_list = args_obj.commands.split(",")
|
|
450
|
+
elif args_obj.windows:
|
|
451
|
+
command_list = vol_instance.getCommands("windows.light" if args_obj.light else "windows.full")
|
|
452
|
+
elif args_obj.linux:
|
|
453
|
+
command_list = vol_instance.getCommands("linux.light" if args_obj.light else "linux.full")
|
|
454
|
+
|
|
455
|
+
# Inject explicit commands into args for CLI
|
|
456
|
+
if command_list:
|
|
457
|
+
args_obj.commands = ",".join(command_list)
|
|
458
|
+
|
|
459
|
+
except Exception as e:
|
|
460
|
+
print(f"[ERROR] Failed to determine commands: {e}")
|
|
461
|
+
command_list = []
|
|
462
|
+
|
|
463
|
+
conn = sqlite3.connect(DB_PATH)
|
|
334
464
|
c = conn.cursor()
|
|
335
|
-
c.execute("INSERT INTO scans (uuid, status, mode, os, volatility_version, dump_path, output_dir, created_at, image, name) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
336
|
-
(scan_id, "pending", "light" if args_obj.light else "full", target_os, vol_version, args_obj.dump, final_output_dir, time.time(), args_obj.image, case_name))
|
|
465
|
+
c.execute("INSERT INTO scans (uuid, status, mode, os, volatility_version, dump_path, output_dir, created_at, image, name, config_json) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
466
|
+
(scan_id, "pending", "light" if args_obj.light else "full", target_os, vol_version, args_obj.dump, final_output_dir, time.time(), args_obj.image, case_name, json.dumps(data)))
|
|
467
|
+
|
|
468
|
+
# Pre-populate module status
|
|
469
|
+
if command_list:
|
|
470
|
+
for cmd in command_list:
|
|
471
|
+
c.execute("INSERT INTO scan_module_status (scan_id, module, status, updated_at) VALUES (?, ?, 'PENDING', ?)", (scan_id, cmd, time.time()))
|
|
472
|
+
|
|
337
473
|
conn.commit()
|
|
338
474
|
conn.close()
|
|
339
475
|
|
|
340
476
|
def background_scan(s_id, args):
|
|
341
|
-
conn = sqlite3.connect(
|
|
477
|
+
conn = sqlite3.connect(DB_PATH)
|
|
342
478
|
c = conn.cursor()
|
|
343
479
|
|
|
344
480
|
try:
|
|
@@ -352,6 +488,11 @@ def scan():
|
|
|
352
488
|
# Ingest results to DB
|
|
353
489
|
ingest_results_to_db(s_id, args.output_dir)
|
|
354
490
|
|
|
491
|
+
# Sweep Logic: Mark any still-pending modules as FAILED
|
|
492
|
+
# This handles cases where containers crashed or produced no output
|
|
493
|
+
c.execute("UPDATE scan_module_status SET status = 'FAILED', error_message = 'Module failed to produce output', updated_at = ? WHERE scan_id = ? AND status IN ('PENDING', 'RUNNING')", (time.time(), s_id))
|
|
494
|
+
conn.commit()
|
|
495
|
+
|
|
355
496
|
c.execute("UPDATE scans SET status = 'completed' WHERE uuid = ?", (s_id,))
|
|
356
497
|
conn.commit()
|
|
357
498
|
except Exception as e:
|
|
@@ -369,7 +510,7 @@ def scan():
|
|
|
369
510
|
|
|
370
511
|
@app.route('/status/<scan_id>', methods=['GET'])
|
|
371
512
|
def get_status(scan_id):
|
|
372
|
-
conn = sqlite3.connect(
|
|
513
|
+
conn = sqlite3.connect(DB_PATH)
|
|
373
514
|
conn.row_factory = sqlite3.Row
|
|
374
515
|
c = conn.cursor()
|
|
375
516
|
c.execute("SELECT * FROM scans WHERE uuid = ?", (scan_id,))
|
|
@@ -395,57 +536,218 @@ def list_images():
|
|
|
395
536
|
except Exception as e:
|
|
396
537
|
return jsonify({"error": str(e)}), 500
|
|
397
538
|
|
|
398
|
-
@app.route('/
|
|
399
|
-
def
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
539
|
+
@app.route('/volatility3/plugins', methods=['GET'])
|
|
540
|
+
def list_volatility_plugins():
|
|
541
|
+
image = request.args.get('image')
|
|
542
|
+
if not image:
|
|
543
|
+
return jsonify({"error": "Missing 'image' query parameter"}), 400
|
|
544
|
+
|
|
545
|
+
try:
|
|
546
|
+
script_content = """
|
|
547
|
+
from volatility3 import framework
|
|
548
|
+
import volatility3.plugins
|
|
549
|
+
import json
|
|
550
|
+
import sys
|
|
551
|
+
|
|
411
552
|
try:
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
553
|
+
failures = framework.import_files(volatility3.plugins, ignore_errors=True)
|
|
554
|
+
plugins = framework.list_plugins() # dict: {"windows.pslist.PsList": <class ...>, ...}
|
|
555
|
+
|
|
556
|
+
output = {
|
|
557
|
+
"count": len(plugins),
|
|
558
|
+
"plugins": sorted(list(plugins.keys())),
|
|
559
|
+
"failures": sorted([str(f) for f in failures]) if failures else []
|
|
560
|
+
}
|
|
561
|
+
print(json.dumps(output))
|
|
562
|
+
except Exception as e:
|
|
563
|
+
print(json.dumps({"error": str(e)}))
|
|
564
|
+
"""
|
|
565
|
+
script_content = textwrap.dedent(script_content)
|
|
566
|
+
|
|
567
|
+
# Write script to outputs dir so we can mount it
|
|
568
|
+
output_dir = os.path.join(os.getcwd(), "outputs")
|
|
569
|
+
if not os.path.exists(output_dir):
|
|
570
|
+
os.makedirs(output_dir)
|
|
571
|
+
|
|
572
|
+
script_path = os.path.join(output_dir, "list_plugins_script.py")
|
|
573
|
+
with open(script_path, "w") as f:
|
|
574
|
+
f.write(script_content)
|
|
575
|
+
|
|
576
|
+
# Resolve host path for Docker volume
|
|
577
|
+
host_script_path = resolve_host_path(script_path)
|
|
578
|
+
|
|
579
|
+
client = docker.from_env()
|
|
580
|
+
|
|
581
|
+
# Run container
|
|
582
|
+
print(f"[DEBUG] running list_plugins on image {image}")
|
|
583
|
+
container = client.containers.run(
|
|
584
|
+
image=image,
|
|
585
|
+
command="python3 /list_plugins.py",
|
|
586
|
+
volumes={
|
|
587
|
+
host_script_path: {'bind': '/list_plugins.py', 'mode': 'ro'}
|
|
588
|
+
},
|
|
589
|
+
stderr=True,
|
|
590
|
+
remove=True
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
# Parse output
|
|
594
|
+
raw_output = container.decode('utf-8')
|
|
595
|
+
try:
|
|
596
|
+
# Output should be mainly JSON
|
|
597
|
+
lines = raw_output.splitlines()
|
|
598
|
+
# It might have stderr logs, so look for JSON
|
|
599
|
+
json_line = None
|
|
600
|
+
for line in reversed(lines):
|
|
601
|
+
if line.strip().startswith('{'):
|
|
602
|
+
json_line = line
|
|
603
|
+
break
|
|
604
|
+
|
|
605
|
+
if json_line:
|
|
606
|
+
data = json.loads(json_line)
|
|
607
|
+
return jsonify(data)
|
|
608
|
+
else:
|
|
609
|
+
return jsonify({"error": "No JSON output found", "raw": raw_output}), 500
|
|
610
|
+
except:
|
|
611
|
+
return jsonify({"error": "Failed to parse script output", "raw": raw_output}), 500
|
|
612
|
+
|
|
613
|
+
except Exception as e:
|
|
614
|
+
print(f"[ERROR] List plugins failed: {e}")
|
|
615
|
+
return jsonify({"error": str(e)}), 500
|
|
616
|
+
|
|
617
|
+
@app.route('/scan/<uuid>/log', methods=['POST'])
|
|
618
|
+
def log_scan_module_status(uuid):
|
|
619
|
+
data = request.json
|
|
620
|
+
if not data:
|
|
621
|
+
return jsonify({"error": "Missing JSON body"}), 400
|
|
622
|
+
|
|
623
|
+
module = data.get('module')
|
|
624
|
+
status = data.get('status')
|
|
625
|
+
error = data.get('error')
|
|
428
626
|
|
|
429
|
-
if not
|
|
430
|
-
|
|
627
|
+
if not module or not status:
|
|
628
|
+
return jsonify({"error": "Missing module or status"}), 400
|
|
629
|
+
|
|
630
|
+
conn = sqlite3.connect(DB_PATH)
|
|
631
|
+
c = conn.cursor()
|
|
632
|
+
try:
|
|
633
|
+
# Upsert status
|
|
634
|
+
# Check if exists
|
|
635
|
+
c.execute("SELECT 1 FROM scan_module_status WHERE scan_id = ? AND module = ?", (uuid, module))
|
|
636
|
+
exists = c.fetchone()
|
|
637
|
+
|
|
638
|
+
if exists:
|
|
639
|
+
c.execute("UPDATE scan_module_status SET status = ?, error_message = ?, updated_at = ? WHERE scan_id = ? AND module = ?",
|
|
640
|
+
(status, error, time.time(), uuid, module))
|
|
641
|
+
else:
|
|
642
|
+
c.execute("INSERT INTO scan_module_status (scan_id, module, status, error_message, updated_at) VALUES (?, ?, ?, ?, ?)",
|
|
643
|
+
(uuid, module, status, error, time.time()))
|
|
644
|
+
|
|
645
|
+
conn.commit()
|
|
646
|
+
return jsonify({"success": True})
|
|
647
|
+
except Exception as e:
|
|
648
|
+
print(f"[ERROR] Failed to log status: {e}")
|
|
649
|
+
return jsonify({"error": str(e)}), 500
|
|
650
|
+
finally:
|
|
651
|
+
conn.close()
|
|
652
|
+
|
|
653
|
+
@app.route('/scan/<uuid>/modules', methods=['GET'])
|
|
654
|
+
def get_scan_modules_status(uuid):
|
|
655
|
+
conn = sqlite3.connect(DB_PATH)
|
|
656
|
+
conn.row_factory = sqlite3.Row
|
|
657
|
+
c = conn.cursor()
|
|
431
658
|
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
filename = os.path.basename(f)
|
|
438
|
-
if filename.endswith("_output.json"):
|
|
439
|
-
# Validate content
|
|
440
|
-
parsed_data = clean_and_parse_json(f)
|
|
441
|
-
if parsed_data and isinstance(parsed_data, dict) and parsed_data.get("error") == "Invalid JSON output":
|
|
442
|
-
continue
|
|
443
|
-
|
|
444
|
-
module_name = filename[:-12]
|
|
445
|
-
modules.append(module_name)
|
|
446
|
-
return jsonify({"modules": modules})
|
|
659
|
+
try:
|
|
660
|
+
# Get scan output directory
|
|
661
|
+
c.execute("SELECT output_dir FROM scans WHERE uuid = ?", (uuid,))
|
|
662
|
+
scan_row = c.fetchone()
|
|
663
|
+
output_dir = scan_row['output_dir'] if scan_row else None
|
|
447
664
|
|
|
448
|
-
|
|
665
|
+
# Try to get status from the status table
|
|
666
|
+
c.execute("SELECT module, status, error_message FROM scan_module_status WHERE scan_id = ?", (uuid,))
|
|
667
|
+
rows = c.fetchall()
|
|
668
|
+
|
|
669
|
+
status_list = []
|
|
670
|
+
docker_client = None
|
|
671
|
+
|
|
672
|
+
if rows:
|
|
673
|
+
for row in rows:
|
|
674
|
+
mod_dict = dict(row)
|
|
675
|
+
module_name = mod_dict['module']
|
|
676
|
+
|
|
677
|
+
# For PENDING/RUNNING modules, check Docker container status
|
|
678
|
+
if mod_dict['status'] in ['PENDING', 'RUNNING']:
|
|
679
|
+
# Predictable container name: vol3_{scan_id[:8]}_{sanitized_module}
|
|
680
|
+
# Must match CLI: re.sub(r'[^a-zA-Z0-9_.-]', '', command)
|
|
681
|
+
import re as re_module
|
|
682
|
+
sanitized_name = re_module.sub(r'[^a-zA-Z0-9_.-]', '', module_name)
|
|
683
|
+
container_name = f"vol3_{uuid[:8]}_{sanitized_name}"
|
|
684
|
+
print(f"[DEBUG] Looking for container: {container_name}")
|
|
685
|
+
|
|
686
|
+
try:
|
|
687
|
+
if docker_client is None:
|
|
688
|
+
import docker
|
|
689
|
+
docker_client = docker.from_env()
|
|
690
|
+
|
|
691
|
+
container = docker_client.containers.get(container_name)
|
|
692
|
+
container_status = container.status # 'running', 'exited', 'created', etc.
|
|
693
|
+
|
|
694
|
+
if container_status == 'running':
|
|
695
|
+
mod_dict['status'] = 'RUNNING'
|
|
696
|
+
c.execute("UPDATE scan_module_status SET status = 'RUNNING', updated_at = ? WHERE scan_id = ? AND module = ?",
|
|
697
|
+
(time.time(), uuid, module_name))
|
|
698
|
+
elif container_status == 'exited':
|
|
699
|
+
# Container finished - file should be ready now
|
|
700
|
+
# Read and ingest JSON
|
|
701
|
+
if output_dir:
|
|
702
|
+
output_file = os.path.join(output_dir, f"{module_name}_output.json")
|
|
703
|
+
if os.path.exists(output_file):
|
|
704
|
+
try:
|
|
705
|
+
parsed_data = clean_and_parse_json(output_file)
|
|
706
|
+
content_str = json.dumps(parsed_data) if parsed_data else "{}"
|
|
707
|
+
# Check if result already exists
|
|
708
|
+
c.execute("SELECT id FROM scan_results WHERE scan_id = ? AND module = ?", (uuid, module_name))
|
|
709
|
+
if not c.fetchone():
|
|
710
|
+
c.execute("INSERT INTO scan_results (scan_id, module, content, created_at) VALUES (?, ?, ?, ?)",
|
|
711
|
+
(uuid, module_name, content_str, time.time()))
|
|
712
|
+
except Exception as e:
|
|
713
|
+
print(f"[ERROR] Failed to ingest {module_name}: {e}")
|
|
714
|
+
|
|
715
|
+
mod_dict['status'] = 'COMPLETED'
|
|
716
|
+
c.execute("UPDATE scan_module_status SET status = 'COMPLETED', updated_at = ? WHERE scan_id = ? AND module = ?",
|
|
717
|
+
(time.time(), uuid, module_name))
|
|
718
|
+
|
|
719
|
+
# Clean up container
|
|
720
|
+
try:
|
|
721
|
+
container.remove()
|
|
722
|
+
except Exception as rm_err:
|
|
723
|
+
print(f"[WARN] Failed to remove container {container_name}: {rm_err}")
|
|
724
|
+
|
|
725
|
+
except Exception as e:
|
|
726
|
+
# Container not found or docker error - leave status as-is
|
|
727
|
+
pass
|
|
728
|
+
|
|
729
|
+
status_list.append(mod_dict)
|
|
730
|
+
|
|
731
|
+
conn.commit()
|
|
732
|
+
else:
|
|
733
|
+
# Fallback: check scan_results table for completed modules
|
|
734
|
+
c.execute("SELECT module FROM scan_results WHERE scan_id = ?", (uuid,))
|
|
735
|
+
result_rows = c.fetchall()
|
|
736
|
+
for r in result_rows:
|
|
737
|
+
status_list.append({
|
|
738
|
+
"module": r['module'],
|
|
739
|
+
"status": "COMPLETED",
|
|
740
|
+
"error_message": None
|
|
741
|
+
})
|
|
742
|
+
|
|
743
|
+
return jsonify(status_list)
|
|
744
|
+
|
|
745
|
+
except Exception as e:
|
|
746
|
+
print(f"[ERROR] Fetching module status: {e}")
|
|
747
|
+
return jsonify({"error": str(e)}), 500
|
|
748
|
+
finally:
|
|
749
|
+
conn.close()
|
|
750
|
+
|
|
449
751
|
|
|
450
752
|
@app.route('/results/<uuid>', methods=['GET'])
|
|
451
753
|
def get_scan_results(uuid):
|
|
@@ -453,7 +755,7 @@ def get_scan_results(uuid):
|
|
|
453
755
|
if not module_param:
|
|
454
756
|
return jsonify({"error": "Missing 'module' query parameter"}), 400
|
|
455
757
|
|
|
456
|
-
conn = sqlite3.connect(
|
|
758
|
+
conn = sqlite3.connect(DB_PATH)
|
|
457
759
|
conn.row_factory = sqlite3.Row
|
|
458
760
|
c = conn.cursor()
|
|
459
761
|
|
|
@@ -503,7 +805,7 @@ def get_scan_results(uuid):
|
|
|
503
805
|
|
|
504
806
|
@app.route('/scans', methods=['GET'])
|
|
505
807
|
def list_scans():
|
|
506
|
-
conn = sqlite3.connect(
|
|
808
|
+
conn = sqlite3.connect(DB_PATH)
|
|
507
809
|
conn.row_factory = sqlite3.Row
|
|
508
810
|
c = conn.cursor()
|
|
509
811
|
c.execute("SELECT * FROM scans ORDER BY created_at DESC")
|
|
@@ -537,9 +839,79 @@ def list_scans():
|
|
|
537
839
|
conn.close()
|
|
538
840
|
return jsonify(scans_list)
|
|
539
841
|
|
|
842
|
+
@app.route('/scans/<uuid>/execute', methods=['POST'])
|
|
843
|
+
def execute_plugin(uuid):
|
|
844
|
+
data = request.json
|
|
845
|
+
module = data.get('module')
|
|
846
|
+
if not module:
|
|
847
|
+
return jsonify({"error": "Missing 'module' parameter"}), 400
|
|
848
|
+
|
|
849
|
+
conn = sqlite3.connect(DB_PATH)
|
|
850
|
+
conn.row_factory = sqlite3.Row
|
|
851
|
+
c = conn.cursor()
|
|
852
|
+
c.execute("SELECT * FROM scans WHERE uuid = ?", (uuid,))
|
|
853
|
+
scan = c.fetchone()
|
|
854
|
+
conn.close()
|
|
855
|
+
|
|
856
|
+
if not scan:
|
|
857
|
+
return jsonify({"error": "Scan not found"}), 404
|
|
858
|
+
|
|
859
|
+
# Reconstruct arguments for runner
|
|
860
|
+
# We use default paths as they are standard in the container
|
|
861
|
+
default_args = {
|
|
862
|
+
"profiles_path": os.path.join(os.getcwd(), "volatility2_profiles"),
|
|
863
|
+
"symbols_path": os.path.join(os.getcwd(), "volatility3_symbols"),
|
|
864
|
+
"cache_path": os.path.join(os.getcwd(), "volatility3_cache"),
|
|
865
|
+
"plugins_dir": os.path.join(os.getcwd(), "volatility3_plugins"),
|
|
866
|
+
"format": "json",
|
|
867
|
+
"commands": module, # Execute only this module
|
|
868
|
+
"light": False,
|
|
869
|
+
"full": False,
|
|
870
|
+
"linux": False,
|
|
871
|
+
"windows": False,
|
|
872
|
+
"mode": scan['volatility_version'], # vol2 or vol3
|
|
873
|
+
"profile": None, # TODO: Store profile in DB for vol2?
|
|
874
|
+
"processes": 1,
|
|
875
|
+
"host_path": os.environ.get("HOST_PATH"),
|
|
876
|
+
"debug": True,
|
|
877
|
+
"fetch_symbol": False,
|
|
878
|
+
"custom_symbol": None, # TODO: Store custom symbol in DB?
|
|
879
|
+
"dump": scan['dump_path'],
|
|
880
|
+
"image": scan['image'],
|
|
881
|
+
"output_dir": scan['output_dir']
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
# Set OS flags based on DB
|
|
885
|
+
if scan['os'] == 'linux':
|
|
886
|
+
default_args['linux'] = True
|
|
887
|
+
default_args['fetch_symbol'] = True # Default for Linux
|
|
888
|
+
elif scan['os'] == 'windows':
|
|
889
|
+
default_args['windows'] = True
|
|
890
|
+
|
|
891
|
+
args_obj = argparse.Namespace(**default_args)
|
|
892
|
+
args_obj.scan_id = uuid # Add scan_id for tracking
|
|
893
|
+
|
|
894
|
+
def background_single_run(s_id, args):
|
|
895
|
+
try:
|
|
896
|
+
# Just run it
|
|
897
|
+
if runner_func:
|
|
898
|
+
print(f"[DEBUG] Executing manual plugin {args.commands} on {s_id}")
|
|
899
|
+
runner_func(args)
|
|
900
|
+
|
|
901
|
+
# Ingest
|
|
902
|
+
ingest_results_to_db(s_id, args.output_dir)
|
|
903
|
+
except Exception as e:
|
|
904
|
+
print(f"[ERROR] Manual plugin execution failed: {e}")
|
|
905
|
+
|
|
906
|
+
thread = threading.Thread(target=background_single_run, args=(uuid, args_obj))
|
|
907
|
+
thread.daemon = True
|
|
908
|
+
thread.start()
|
|
909
|
+
|
|
910
|
+
return jsonify({"status": "started", "module": module})
|
|
911
|
+
|
|
540
912
|
@app.route('/stats', methods=['GET'])
|
|
541
913
|
def get_stats():
|
|
542
|
-
conn = sqlite3.connect(
|
|
914
|
+
conn = sqlite3.connect(DB_PATH)
|
|
543
915
|
c = conn.cursor()
|
|
544
916
|
c.execute("SELECT COUNT(*) FROM scans")
|
|
545
917
|
total_cases = c.fetchone()[0]
|
|
@@ -579,6 +951,9 @@ def list_evidences():
|
|
|
579
951
|
|
|
580
952
|
try:
|
|
581
953
|
items = os.listdir(STORAGE_DIR)
|
|
954
|
+
# Robustly filter system files immediately
|
|
955
|
+
items = [i for i in items if not (i.startswith("scans.db") or i.endswith(".sha256"))]
|
|
956
|
+
|
|
582
957
|
print(f"[DEBUG] list_evidences found {len(items)} items in {STORAGE_DIR}")
|
|
583
958
|
print(f"[DEBUG] Items: {items}")
|
|
584
959
|
except FileNotFoundError:
|
|
@@ -588,10 +963,10 @@ def list_evidences():
|
|
|
588
963
|
# Pre-load Case Name map from DB
|
|
589
964
|
case_map = {} # filename -> case name
|
|
590
965
|
try:
|
|
591
|
-
conn = sqlite3.connect(
|
|
966
|
+
conn = sqlite3.connect(DB_PATH)
|
|
592
967
|
conn.row_factory = sqlite3.Row
|
|
593
968
|
c = conn.cursor()
|
|
594
|
-
c.execute("SELECT name, dump_path FROM scans")
|
|
969
|
+
c.execute("SELECT name, dump_path FROM scans ORDER BY created_at ASC")
|
|
595
970
|
rows = c.fetchall()
|
|
596
971
|
for r in rows:
|
|
597
972
|
if r['name'] and r['dump_path']:
|
|
@@ -609,6 +984,10 @@ def list_evidences():
|
|
|
609
984
|
|
|
610
985
|
|
|
611
986
|
for item in items:
|
|
987
|
+
# Filter out system files
|
|
988
|
+
if item.startswith("scans.db") or item.endswith(".sha256"):
|
|
989
|
+
continue
|
|
990
|
+
|
|
612
991
|
path = os.path.join(STORAGE_DIR, item)
|
|
613
992
|
if os.path.isdir(path) and item.endswith("_extracted"):
|
|
614
993
|
# This is an extracted folder
|
|
@@ -640,7 +1019,7 @@ def list_evidences():
|
|
|
640
1019
|
# Case Name match attempt
|
|
641
1020
|
matched_case_name = dump_base
|
|
642
1021
|
try:
|
|
643
|
-
conn = sqlite3.connect(
|
|
1022
|
+
conn = sqlite3.connect(DB_PATH)
|
|
644
1023
|
conn.row_factory = sqlite3.Row
|
|
645
1024
|
c = conn.cursor()
|
|
646
1025
|
c.execute("SELECT dump_path FROM scans WHERE name = ? ORDER BY created_at DESC LIMIT 1", (dump_base,))
|
|
@@ -687,7 +1066,7 @@ def list_evidences():
|
|
|
687
1066
|
continue
|
|
688
1067
|
|
|
689
1068
|
# Resolve Display Name (Case Name)
|
|
690
|
-
display_name = case_map.get(item,
|
|
1069
|
+
display_name = case_map.get(item, "Unassigned Evidence")
|
|
691
1070
|
|
|
692
1071
|
# WRAP IN GROUP to ensure Folder Style
|
|
693
1072
|
child_file = {
|
|
@@ -745,6 +1124,10 @@ def get_file_hash(filepath):
|
|
|
745
1124
|
|
|
746
1125
|
@app.route('/evidence/<filename>', methods=['DELETE'])
|
|
747
1126
|
def delete_evidence(filename):
|
|
1127
|
+
# Strip virtual group prefix if present
|
|
1128
|
+
if filename.startswith("group_"):
|
|
1129
|
+
filename = filename[6:]
|
|
1130
|
+
|
|
748
1131
|
filename = secure_filename(filename)
|
|
749
1132
|
path = os.path.join(STORAGE_DIR, filename)
|
|
750
1133
|
if os.path.exists(path):
|
|
@@ -756,7 +1139,7 @@ def delete_evidence(filename):
|
|
|
756
1139
|
os.remove(path)
|
|
757
1140
|
# Remove sidecar hash if exists
|
|
758
1141
|
if os.path.exists(path + ".sha256"):
|
|
759
|
-
|
|
1142
|
+
os.remove(path + ".sha256")
|
|
760
1143
|
|
|
761
1144
|
# Also remove extracted directory (if this was a dump file)
|
|
762
1145
|
# Checks for standard <filename>_extracted pattern
|
|
@@ -780,7 +1163,7 @@ def download_evidence(filename):
|
|
|
780
1163
|
def cleanup_timeouts():
|
|
781
1164
|
"""Marks scans running for > 1 hour as failed (timeout)."""
|
|
782
1165
|
try:
|
|
783
|
-
conn = sqlite3.connect(
|
|
1166
|
+
conn = sqlite3.connect(DB_PATH)
|
|
784
1167
|
c = conn.cursor()
|
|
785
1168
|
one_hour_ago = time.time() - 3600
|
|
786
1169
|
|
|
@@ -797,148 +1180,248 @@ def cleanup_timeouts():
|
|
|
797
1180
|
except Exception as e:
|
|
798
1181
|
print(f"Error cleaning up timeouts: {e}")
|
|
799
1182
|
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
1183
|
+
|
|
1184
|
+
# In-memory dictionary to store dump task statuses
|
|
1185
|
+
# NOTE: This is a temporary solution for the provided diff.
|
|
1186
|
+
# A more robust solution would persist this in the database.
|
|
1187
|
+
dump_tasks = {}
|
|
1188
|
+
|
|
1189
|
+
def background_dump_task(task_id, scan, virt_addr, image_tag, file_path=None):
|
|
1190
|
+
"""
|
|
1191
|
+
Executes a Volatility3 dump command.
|
|
1192
|
+
Windows: uses windows.dumpfiles.DumpFiles --virtaddr
|
|
1193
|
+
Linux: uses linux.pagecache.Files --find <path> --dump
|
|
1194
|
+
"""
|
|
1195
|
+
print(f"[{task_id}] DEBUG: Starting background dump task for scan: {scan['uuid']}")
|
|
1196
|
+
dump_tasks[task_id] = {'status': 'running'}
|
|
805
1197
|
|
|
806
1198
|
try:
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
#
|
|
811
|
-
|
|
812
|
-
scan = c.fetchone()
|
|
1199
|
+
# We need the memory dump file name on host
|
|
1200
|
+
# The 'scan' row has 'filepath'.
|
|
1201
|
+
# But wait, that filepath is the UPLOADED path (e.g. /app/storage/uploads/...)
|
|
1202
|
+
# We need the filename to point to /dump_dir inside container.
|
|
1203
|
+
uploaded_path = scan['dump_path'] # Changed from 'filepath' to 'dump_path' to match original scan structure
|
|
813
1204
|
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
dump_path = scan['dump_path']
|
|
818
|
-
if not os.path.isabs(dump_path) and not dump_path.startswith('/'):
|
|
819
|
-
dump_path = os.path.join(STORAGE_DIR, dump_path)
|
|
1205
|
+
print(f"[{task_id}] DEBUG: Raw uploaded_path: {uploaded_path}")
|
|
1206
|
+
if not os.path.isabs(uploaded_path) and not uploaded_path.startswith('/'):
|
|
1207
|
+
uploaded_path = os.path.join(STORAGE_DIR, uploaded_path)
|
|
820
1208
|
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
# Ensure scan output dir exists
|
|
824
|
-
if not scan_output_dir or not os.path.exists(scan_output_dir):
|
|
825
|
-
scan_output_dir = os.path.join(os.getcwd(), "outputs", f"volatility3_{scan_id}")
|
|
826
|
-
os.makedirs(scan_output_dir, exist_ok=True)
|
|
827
|
-
|
|
828
|
-
target_output_dir = os.path.join(scan_output_dir, "downloads", task_id)
|
|
829
|
-
os.makedirs(target_output_dir, exist_ok=True)
|
|
830
|
-
|
|
831
|
-
# 3. Resolve Paths & Volumes
|
|
832
|
-
host_path = os.environ.get("HOST_PATH")
|
|
833
|
-
def resolve(p):
|
|
834
|
-
if host_path:
|
|
835
|
-
if p.startswith(os.getcwd()):
|
|
836
|
-
rel = os.path.relpath(p, os.getcwd())
|
|
837
|
-
return os.path.join(host_path, rel)
|
|
838
|
-
if p.startswith("/storage"):
|
|
839
|
-
return os.path.join(host_path, "storage", "data", os.path.relpath(p, "/storage"))
|
|
840
|
-
return p
|
|
841
|
-
|
|
842
|
-
abs_dump_path = os.path.abspath(dump_path)
|
|
843
|
-
abs_dump_dir = os.path.dirname(abs_dump_path)
|
|
844
|
-
dump_filename = os.path.basename(abs_dump_path)
|
|
845
|
-
|
|
846
|
-
symbols_path = os.path.join(os.getcwd(), "volatility3_symbols")
|
|
847
|
-
cache_path = os.path.join(os.getcwd(), "volatility3_cache")
|
|
848
|
-
plugins_path = os.path.join(os.getcwd(), "volatility3_plugins")
|
|
1209
|
+
print(f"[{task_id}] DEBUG: Resolved uploaded_path: {uploaded_path}")
|
|
1210
|
+
dump_filename = os.path.basename(uploaded_path)
|
|
849
1211
|
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
resolve(target_output_dir): {'bind': '/output', 'mode': 'rw'},
|
|
853
|
-
resolve(symbols_path): {'bind': '/symbols', 'mode': 'rw'},
|
|
854
|
-
resolve(cache_path): {'bind': '/root/.cache/volatility3', 'mode': 'rw'},
|
|
855
|
-
resolve(plugins_path): {'bind': '/plugins', 'mode': 'ro'}
|
|
856
|
-
}
|
|
1212
|
+
# Construct basic command
|
|
1213
|
+
cmd = ["vol", "-q", "-f", f"/dump_dir/{dump_filename}", "-o", "/output"]
|
|
857
1214
|
|
|
858
|
-
#
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
]
|
|
867
|
-
|
|
868
|
-
print(f"[DEBUG] [Task {task_id}] Running: {cmd}")
|
|
869
|
-
|
|
870
|
-
client.containers.run(
|
|
871
|
-
image=docker_image,
|
|
872
|
-
command=cmd,
|
|
873
|
-
volumes=volumes,
|
|
874
|
-
remove=True,
|
|
875
|
-
stderr=True,
|
|
876
|
-
stdout=True
|
|
877
|
-
) # This blocks until completion
|
|
1215
|
+
# Parse Config
|
|
1216
|
+
config = {}
|
|
1217
|
+
# scan is sqlite3.Row, supports key access
|
|
1218
|
+
if 'config_json' in scan.keys() and scan['config_json']:
|
|
1219
|
+
try:
|
|
1220
|
+
config = json.loads(scan['config_json'])
|
|
1221
|
+
except:
|
|
1222
|
+
print(f"[{task_id}] WARN: Failed to parse config_json")
|
|
878
1223
|
|
|
879
|
-
#
|
|
880
|
-
|
|
881
|
-
|
|
1224
|
+
# ISF Handling (Must be generally available or before plugin)
|
|
1225
|
+
# multi_volatility3.py puts it before command.
|
|
1226
|
+
if scan['os'] == 'linux' and config.get('fetch_symbol'):
|
|
1227
|
+
print(f"[{task_id}] DEBUG: Enabling remote ISF URL based on scan config")
|
|
1228
|
+
cmd.extend(["--remote-isf-url", "https://github.com/Abyss-W4tcher/volatility3-symbols/raw/master/banners/banners.json"])
|
|
1229
|
+
|
|
1230
|
+
# Explicitly set symbols path if not using ISF?
|
|
1231
|
+
# Actually vol defaults to checking standard paths, and we mount /symbols.
|
|
1232
|
+
# But we should probably add -s /symbols for clarity/correctness if not using ISF?
|
|
1233
|
+
# Actually, multi_volatility3 DOES add -s /symbols ALWAYS.
|
|
1234
|
+
cmd.extend(["-s", "/symbols"])
|
|
1235
|
+
|
|
1236
|
+
|
|
1237
|
+
# Determine plugin based on OS
|
|
1238
|
+
print(f"[{task_id}] DEBUG: Scan OS: {scan['os']}")
|
|
1239
|
+
if scan['os'] == 'linux':
|
|
1240
|
+
# Linux dump logic: vol -f ... linux.pagecache.Files --find {path_name} --dump
|
|
1241
|
+
if not file_path:
|
|
1242
|
+
raise Exception("File Path is required for Linux dumps")
|
|
1243
|
+
|
|
1244
|
+
print(f"[{task_id}] DEBUG: Linux Dump - FilePath: {file_path}")
|
|
1245
|
+
cmd.append("linux.pagecache.Files")
|
|
1246
|
+
cmd.append("--find")
|
|
1247
|
+
cmd.append(file_path)
|
|
1248
|
+
cmd.append("--dump")
|
|
1249
|
+
|
|
1250
|
+
# Symbols / ISF Handling
|
|
1251
|
+
# We assume if it's Linux we might need the ISF URL if local symbols aren't enough.
|
|
1252
|
+
# Per user request: "use the /symbols folder ... OR the ISF link".
|
|
1253
|
+
# The docker container has /symbols
|
|
882
1254
|
|
|
883
|
-
for f in files:
|
|
884
|
-
if not f.endswith(".json") and f != "." and f != "..":
|
|
885
|
-
target_file = f
|
|
886
|
-
break
|
|
887
|
-
|
|
888
|
-
if not target_file:
|
|
889
|
-
raise Exception("No file extracted (DumpFiles returned no candidate)")
|
|
890
|
-
|
|
891
|
-
# Organize downloads in STORAGE_DIR / <CaseName_or_DumpName>_extracted / <target_file>
|
|
892
|
-
# Use Case Name if available, otherwise dump filename
|
|
893
|
-
case_name = scan['name']
|
|
894
|
-
if case_name:
|
|
895
|
-
# Sanitize case name for folder usage
|
|
896
|
-
safe_case_name = secure_filename(case_name)
|
|
897
|
-
extracted_dir_name = f"{safe_case_name}_extracted"
|
|
898
1255
|
else:
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
1256
|
+
# Windows dump logic (default)
|
|
1257
|
+
cmd.append("windows.dumpfiles.DumpFiles")
|
|
1258
|
+
cmd.append("--virtaddr")
|
|
1259
|
+
cmd.append(str(virt_addr))
|
|
1260
|
+
|
|
1261
|
+
print(f"[{task_id}] Running dump command inside container: {cmd}")
|
|
1262
|
+
|
|
1263
|
+
# Run Docker
|
|
1264
|
+
# We must mount:
|
|
1265
|
+
# STORAGE_DIR/uploads -> /dump_dir
|
|
1266
|
+
# STORAGE_DIR/<ScanID>_extracted (or temp) -> /output
|
|
1267
|
+
# STORAGE_DIR/symbols -> /symbols
|
|
1268
|
+
# STORAGE_DIR/cache -> /root/.cache/volatility3
|
|
903
1269
|
|
|
904
|
-
|
|
1270
|
+
# Output dir:
|
|
1271
|
+
# We'll create a specific folder for this extraction or just use the common one.
|
|
1272
|
+
# Let's use a temp dir for the dump, then move the file.
|
|
1273
|
+
case_name = scan['name'] # Changed from 'case_name' to 'name' to match original scan structure
|
|
1274
|
+
case_extract_dir = os.path.join(STORAGE_DIR, f"{case_name}_extracted")
|
|
1275
|
+
if not os.path.exists(case_extract_dir):
|
|
1276
|
+
os.makedirs(case_extract_dir)
|
|
1277
|
+
|
|
1278
|
+
# We'll map the HOST path to /output.
|
|
1279
|
+
# Actually, simpler to map case_extract_dir to /output.
|
|
1280
|
+
# BUT, volatility output filenames usually include virtaddr or pid.
|
|
1281
|
+
# We want to identify the file we just dumped.
|
|
905
1282
|
|
|
906
|
-
#
|
|
907
|
-
|
|
908
|
-
|
|
1283
|
+
# Let's use a temporary directory for THIS task
|
|
1284
|
+
task_out_dir = os.path.join(STORAGE_DIR, f"task_{task_id}")
|
|
1285
|
+
if not os.path.exists(task_out_dir):
|
|
1286
|
+
os.makedirs(task_out_dir)
|
|
1287
|
+
|
|
1288
|
+
# Retrieve Docker Image
|
|
1289
|
+
# If image_tag is provided, use it. Else use default
|
|
1290
|
+
# Check if local build?
|
|
1291
|
+
# The frontend sends 'image' from caseDetails.
|
|
909
1292
|
|
|
910
|
-
#
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
1293
|
+
# Prepare Volumes using STORAGE_DIR
|
|
1294
|
+
symbols_path = os.path.join(STORAGE_DIR, 'symbols')
|
|
1295
|
+
cache_path = os.path.join(STORAGE_DIR, 'cache')
|
|
1296
|
+
|
|
1297
|
+
# Ensure directories exist
|
|
1298
|
+
os.makedirs(symbols_path, exist_ok=True)
|
|
1299
|
+
os.makedirs(cache_path, exist_ok=True)
|
|
914
1300
|
|
|
1301
|
+
# Docker Volumes Mapping
|
|
1302
|
+
volumes = {
|
|
1303
|
+
os.path.dirname(uploaded_path): {'bind': '/dump_dir', 'mode': 'ro'},
|
|
1304
|
+
task_out_dir: {'bind': '/output', 'mode': 'rw'},
|
|
1305
|
+
symbols_path: {'bind': '/symbols', 'mode': 'ro'},
|
|
1306
|
+
cache_path: {'bind': '/root/.cache/volatility3', 'mode': 'rw'}
|
|
1307
|
+
}
|
|
1308
|
+
|
|
1309
|
+
print(f"[{task_id}] Launching Docker container: {image_tag}")
|
|
1310
|
+
print(f"[{task_id}] Volumes config: {volumes}")
|
|
1311
|
+
|
|
1312
|
+
# Consistent naming for debug
|
|
1313
|
+
# Format: vol3_dump_<short_scan_uuid>_<task_id>
|
|
1314
|
+
# Scan UUID is text, sanitize just in case
|
|
1315
|
+
safe_scan_id = re.sub(r'[^a-zA-Z0-9]', '', scan['uuid'])[:8]
|
|
1316
|
+
container_name = f"vol3_dump_{safe_scan_id}_{task_id}"
|
|
1317
|
+
|
|
1318
|
+
try:
|
|
1319
|
+
client = docker.from_env()
|
|
1320
|
+
container = client.containers.run(
|
|
1321
|
+
image=image_tag,
|
|
1322
|
+
name=container_name,
|
|
1323
|
+
command=cmd,
|
|
1324
|
+
volumes=volumes,
|
|
1325
|
+
remove=True,
|
|
1326
|
+
detach=False, # Wait for completion
|
|
1327
|
+
stderr=True,
|
|
1328
|
+
stdout=True
|
|
1329
|
+
)
|
|
1330
|
+
print(f"[{task_id}] Docker finished. Output bytes: {len(container) if container else 0}")
|
|
1331
|
+
# print(f"[{task_id}] Container output (first 200 chars): {container[:200] if container else ''}")
|
|
1332
|
+
except docker.errors.ImageNotFound:
|
|
1333
|
+
print(f"[{task_id}] Pulling image {image_tag}...")
|
|
1334
|
+
client.images.pull(image_tag)
|
|
1335
|
+
container = client.containers.run(
|
|
1336
|
+
image=image_tag,
|
|
1337
|
+
name=container_name,
|
|
1338
|
+
command=cmd,
|
|
1339
|
+
volumes=volumes,
|
|
1340
|
+
remove=True,
|
|
1341
|
+
detach=False
|
|
1342
|
+
)
|
|
1343
|
+
except Exception as e:
|
|
1344
|
+
print(f"[{task_id}] CRITICAL DOCKER ERROR: {e}")
|
|
1345
|
+
raise Exception(f"Docker execution failed: {e}")
|
|
1346
|
+
|
|
1347
|
+
# After run, check files in task_out_dir
|
|
1348
|
+
files = os.listdir(task_out_dir)
|
|
1349
|
+
print(f"[{task_id}] Files in output dir: {files}")
|
|
1350
|
+
if not files:
|
|
1351
|
+
raise Exception("No file extracted by Volatility plugin.")
|
|
1352
|
+
|
|
1353
|
+
# Move files to final destination (case_extract_dir)
|
|
1354
|
+
# And maybe rename?
|
|
1355
|
+
created_files = []
|
|
1356
|
+
for f in files:
|
|
1357
|
+
src = os.path.join(task_out_dir, f)
|
|
1358
|
+
dst = os.path.join(case_extract_dir, f)
|
|
1359
|
+
shutil.move(src, dst)
|
|
1360
|
+
created_files.append(f)
|
|
1361
|
+
|
|
1362
|
+
# Cleanup
|
|
1363
|
+
os.rmdir(task_out_dir)
|
|
1364
|
+
|
|
1365
|
+
# Update DB/Task status
|
|
1366
|
+
# Since we use simple dict for now:
|
|
1367
|
+
dump_tasks[task_id]['status'] = 'completed'
|
|
1368
|
+
dump_tasks[task_id]['output_path'] = f"/evidence/{created_files[0]}/download" # Basic assumption
|
|
1369
|
+
print(f"[{task_id}] Task completed successfully. Output: {dump_tasks[task_id]['output_path']}")
|
|
1370
|
+
|
|
1371
|
+
# Ideally, update DB if we were using it for dump_tasks (we only insert PENDING, but never update status in DB in this code?)
|
|
1372
|
+
# Ah, the previous code had DB update logic but I removed it/it's not in this snippet.
|
|
1373
|
+
# Let's add basic DB update so status persists if backend restarts?
|
|
1374
|
+
# For now, memory dict is what endpoint checks.
|
|
1375
|
+
|
|
915
1376
|
except Exception as e:
|
|
916
|
-
print(f"[
|
|
917
|
-
|
|
918
|
-
|
|
1377
|
+
print(f"[{task_id}] TASK FAILED: {str(e)}")
|
|
1378
|
+
import traceback
|
|
1379
|
+
traceback.print_exc()
|
|
1380
|
+
dump_tasks[task_id]['status'] = 'failed'
|
|
1381
|
+
dump_tasks[task_id]['error'] = str(e)
|
|
919
1382
|
finally:
|
|
1383
|
+
# The original code updated the database, but the provided diff removes this.
|
|
1384
|
+
# Keeping the database update for consistency with other functions.
|
|
1385
|
+
conn = sqlite3.connect(DB_PATH)
|
|
1386
|
+
c = conn.cursor()
|
|
1387
|
+
if dump_tasks[task_id]['status'] == 'completed':
|
|
1388
|
+
output_path = os.path.join(case_extract_dir, created_files[0]) if created_files else None
|
|
1389
|
+
c.execute("UPDATE dump_tasks SET status = 'completed', output_path = ? WHERE task_id = ?", (output_path, task_id))
|
|
1390
|
+
else:
|
|
1391
|
+
error_msg = dump_tasks[task_id].get('error', 'Unknown error')
|
|
1392
|
+
c.execute("UPDATE dump_tasks SET status = 'failed', error = ? WHERE task_id = ?", (error_msg, task_id))
|
|
1393
|
+
conn.commit()
|
|
920
1394
|
conn.close()
|
|
921
1395
|
|
|
1396
|
+
|
|
922
1397
|
@app.route('/scan/<scan_id>/dump-file', methods=['POST'])
|
|
923
1398
|
def dump_file_from_memory(scan_id):
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
docker_image = data.get('image')
|
|
927
|
-
|
|
928
|
-
if not virt_addr:
|
|
929
|
-
return jsonify({"error": "Missing 'virt_addr'"}), 400
|
|
930
|
-
if not docker_image:
|
|
931
|
-
return jsonify({"error": "Missing 'image'"}), 400
|
|
932
|
-
|
|
933
|
-
conn = sqlite3.connect('scans.db')
|
|
1399
|
+
# Use standard connection method
|
|
1400
|
+
conn = sqlite3.connect(DB_PATH)
|
|
934
1401
|
conn.row_factory = sqlite3.Row
|
|
935
1402
|
c = conn.cursor()
|
|
936
|
-
c.execute("SELECT * FROM scans WHERE uuid = ?", (scan_id,))
|
|
937
|
-
scan = c.fetchone()
|
|
938
1403
|
|
|
1404
|
+
c.execute('SELECT * FROM scans WHERE uuid = ?', (scan_id,))
|
|
1405
|
+
scan = c.fetchone()
|
|
1406
|
+
|
|
939
1407
|
if not scan:
|
|
940
1408
|
conn.close()
|
|
941
|
-
return jsonify({
|
|
1409
|
+
return jsonify({'error': 'Scan not found'}), 404
|
|
1410
|
+
|
|
1411
|
+
data = request.json
|
|
1412
|
+
|
|
1413
|
+
# Determine default image fallback based on Volatility version
|
|
1414
|
+
default_image = "sp00kyskelet0n/volatility3"
|
|
1415
|
+
if scan['volatility_version'] == "2":
|
|
1416
|
+
default_image = "sp00kyskelet0n/volatility2"
|
|
1417
|
+
|
|
1418
|
+
virt_addr = data.get('virt_addr')
|
|
1419
|
+
image = data.get('image') or scan['image'] or default_image
|
|
1420
|
+
file_path = data.get('file_path')
|
|
1421
|
+
|
|
1422
|
+
if not virt_addr and not file_path:
|
|
1423
|
+
conn.close()
|
|
1424
|
+
return jsonify({'error': 'Virtual address or File Path required'}), 400
|
|
942
1425
|
|
|
943
1426
|
# Create Task
|
|
944
1427
|
task_id = str(uuid.uuid4())
|
|
@@ -947,8 +1430,12 @@ def dump_file_from_memory(scan_id):
|
|
|
947
1430
|
conn.commit()
|
|
948
1431
|
conn.close()
|
|
949
1432
|
|
|
1433
|
+
# Convert scan row to dict to pass to thread safely
|
|
1434
|
+
scan_dict = dict(scan)
|
|
1435
|
+
|
|
950
1436
|
# Start Background Thread
|
|
951
|
-
|
|
1437
|
+
# Signature: background_dump_task(task_id, scan, virt_addr, image_tag, file_path=None)
|
|
1438
|
+
thread = threading.Thread(target=background_dump_task, args=(task_id, scan_dict, virt_addr, image, file_path))
|
|
952
1439
|
thread.daemon = True
|
|
953
1440
|
thread.start()
|
|
954
1441
|
|
|
@@ -956,7 +1443,7 @@ def dump_file_from_memory(scan_id):
|
|
|
956
1443
|
|
|
957
1444
|
@app.route('/dump-task/<task_id>', methods=['GET'])
|
|
958
1445
|
def get_dump_status(task_id):
|
|
959
|
-
conn = sqlite3.connect(
|
|
1446
|
+
conn = sqlite3.connect(DB_PATH)
|
|
960
1447
|
conn.row_factory = sqlite3.Row
|
|
961
1448
|
c = conn.cursor()
|
|
962
1449
|
c.execute("SELECT * FROM dump_tasks WHERE task_id = ?", (task_id,))
|
|
@@ -970,7 +1457,7 @@ def get_dump_status(task_id):
|
|
|
970
1457
|
|
|
971
1458
|
@app.route('/dump-task/<task_id>/download', methods=['GET'])
|
|
972
1459
|
def download_dump_result(task_id):
|
|
973
|
-
conn = sqlite3.connect(
|
|
1460
|
+
conn = sqlite3.connect(DB_PATH)
|
|
974
1461
|
conn.row_factory = sqlite3.Row
|
|
975
1462
|
c = conn.cursor()
|
|
976
1463
|
c.execute("SELECT * FROM dump_tasks WHERE task_id = ?", (task_id,))
|