FlowAnalyzer 0.4.3__tar.gz → 0.4.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,305 @@
1
+ import os
2
+ import sqlite3
3
+ import subprocess
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ from typing import Iterable, Optional
6
+
7
+ from .logging_config import logger
8
+ from .Models import HttpPair, Request, Response
9
+ from .PacketParser import PacketParser
10
+ from .Path import get_default_tshark_path
11
+
12
+
13
+ class FlowAnalyzer:
14
+ """
15
+ FlowAnalyzer 流量分析器 (智能缓存版)
16
+ 特点:
17
+ 1. Tshark -> Pipe -> ThreadPool -> SQLite
18
+ 2. 智能校验:自动比对 Filter 和文件修改时间,防止缓存错乱
19
+ 3. 存储优化:数据库文件生成在流量包同级目录下
20
+ """
21
+
22
+ def __init__(self, db_path: str):
23
+ """
24
+ 初始化 FlowAnalyzer
25
+ :param db_path: 数据库文件路径 (由 get_json_data 返回)
26
+ """
27
+ # 路径兼容处理
28
+ if db_path.endswith(".json"):
29
+ possible_db = db_path + ".db"
30
+ if os.path.exists(possible_db):
31
+ self.db_path = possible_db
32
+ else:
33
+ self.db_path = db_path
34
+ else:
35
+ self.db_path = db_path
36
+
37
+ self.check_db_file()
38
+
39
+ def check_db_file(self):
40
+ """检查数据库文件是否存在"""
41
+ if not os.path.exists(self.db_path):
42
+ raise FileNotFoundError(f"未找到数据文件或缓存数据库: {self.db_path},请先调用 get_json_data 生成。")
43
+
44
+ def generate_http_dict_pairs(self) -> Iterable[HttpPair]:
45
+ """生成HTTP请求和响应信息的字典对 (SQL JOIN 高性能版)"""
46
+ if not os.path.exists(self.db_path):
47
+ return
48
+
49
+ with sqlite3.connect(self.db_path) as conn:
50
+ cursor = conn.cursor()
51
+ # 开启查询优化
52
+ cursor.execute("PRAGMA query_only = 1;")
53
+
54
+ # === 第一步:配对查询 ===
55
+ # 利用 SQLite 的 LEFT JOIN 直接匹配请求和响应
56
+ sql_pair = """
57
+ SELECT
58
+ req.frame_num, req.header, req.file_data, req.full_uri, req.time_epoch, -- 0-4 (Request)
59
+ resp.frame_num, resp.header, resp.file_data, resp.time_epoch, resp.request_in -- 5-9 (Response)
60
+ FROM requests req
61
+ LEFT JOIN responses resp ON req.frame_num = resp.request_in
62
+ ORDER BY req.frame_num ASC
63
+ """
64
+
65
+ cursor.execute(sql_pair)
66
+
67
+ # 流式遍历结果,内存占用极低
68
+ for row in cursor:
69
+ req = Request(frame_num=row[0], header=row[1] or b"", file_data=row[2] or b"", full_uri=row[3] or "", time_epoch=row[4])
70
+
71
+ resp = None
72
+ if row[5] is not None:
73
+ resp = Response(frame_num=row[5], header=row[6] or b"", file_data=row[7] or b"", time_epoch=row[8], _request_in=row[9])
74
+
75
+ yield HttpPair(request=req, response=resp)
76
+
77
+ # === 第二步:孤儿响应查询 ===
78
+ sql_orphan = """
79
+ SELECT frame_num, header, file_data, time_epoch, request_in
80
+ FROM responses
81
+ WHERE request_in NOT IN (SELECT frame_num FROM requests)
82
+ """
83
+ cursor.execute(sql_orphan)
84
+
85
+ for row in cursor:
86
+ resp = Response(frame_num=row[0], header=row[1] or b"", file_data=row[2] or b"", time_epoch=row[3], _request_in=row[4])
87
+ yield HttpPair(request=None, response=resp)
88
+
89
+ # =========================================================================
90
+ # 静态方法区域:包含校验逻辑和流式处理
91
+ # =========================================================================
92
+
93
+ @staticmethod
94
+ def get_json_data(file_path: str, display_filter: str, tshark_path: Optional[str] = None) -> str:
95
+ """
96
+ 获取数据路径 (智能校验版)。
97
+ """
98
+ if not os.path.exists(file_path):
99
+ raise FileNotFoundError("流量包路径不存在:%s" % file_path)
100
+
101
+ abs_file_path = os.path.abspath(file_path)
102
+ pcap_dir = os.path.dirname(abs_file_path)
103
+ base_name = os.path.splitext(os.path.basename(abs_file_path))[0]
104
+ db_path = os.path.join(pcap_dir, f"{base_name}.db")
105
+
106
+ if FlowAnalyzer._is_cache_valid(db_path, abs_file_path, display_filter):
107
+ logger.debug(f"缓存校验通过 (Filter匹配且文件未变),使用缓存: [{db_path}]")
108
+ return db_path
109
+ else:
110
+ logger.debug(f"缓存失效或不存在 (Filter变更或文件更新),开始重新解析...")
111
+
112
+ tshark_path = FlowAnalyzer.get_tshark_path(tshark_path)
113
+ FlowAnalyzer._stream_tshark_to_db(abs_file_path, display_filter, tshark_path, db_path)
114
+
115
+ return db_path
116
+
117
+ @staticmethod
118
+ def get_db_data(file_path: str, display_filter: str, tshark_path: Optional[str] = None) -> str:
119
+ return FlowAnalyzer.get_json_data(file_path, display_filter, tshark_path)
120
+
121
+ @staticmethod
122
+ def _is_cache_valid(db_path: str, pcap_path: str, current_filter: str) -> bool:
123
+ if not os.path.exists(db_path) or os.path.getsize(db_path) == 0:
124
+ return False
125
+
126
+ try:
127
+ current_mtime = os.path.getmtime(pcap_path)
128
+ current_size = os.path.getsize(pcap_path)
129
+
130
+ with sqlite3.connect(db_path) as conn:
131
+ cursor = conn.cursor()
132
+ cursor.execute("SELECT filter, pcap_mtime, pcap_size FROM meta_info LIMIT 1")
133
+ row = cursor.fetchone()
134
+
135
+ if not row:
136
+ return False
137
+
138
+ cached_filter, cached_mtime, cached_size = row
139
+
140
+ if cached_filter == current_filter and cached_size == current_size and abs(cached_mtime - current_mtime) < 0.1:
141
+ return True
142
+ else:
143
+ logger.debug(f"校验失败: 缓存Filter={cached_filter} vs 当前={current_filter}")
144
+ return False
145
+
146
+ except sqlite3.OperationalError:
147
+ return False
148
+ except Exception as e:
149
+ logger.warning(f"缓存校验出错: {e},将重新解析")
150
+ return False
151
+
152
+ @staticmethod
153
+ def _stream_tshark_to_db(pcap_path: str, display_filter: str, tshark_path: str, db_path: str):
154
+ """流式解析并存入DB (多线程版)"""
155
+ if os.path.exists(db_path):
156
+ os.remove(db_path)
157
+
158
+ with sqlite3.connect(db_path) as conn:
159
+ cursor = conn.cursor()
160
+ cursor.execute("PRAGMA synchronous = OFF")
161
+ cursor.execute("PRAGMA journal_mode = MEMORY")
162
+
163
+ cursor.execute("CREATE TABLE requests (frame_num INTEGER PRIMARY KEY, header BLOB, file_data BLOB, full_uri TEXT, time_epoch REAL)")
164
+ cursor.execute("CREATE TABLE responses (frame_num INTEGER PRIMARY KEY, header BLOB, file_data BLOB, time_epoch REAL, request_in INTEGER)")
165
+
166
+ cursor.execute("""
167
+ CREATE TABLE meta_info (
168
+ id INTEGER PRIMARY KEY,
169
+ filter TEXT,
170
+ pcap_path TEXT,
171
+ pcap_mtime REAL,
172
+ pcap_size INTEGER
173
+ )
174
+ """)
175
+ conn.commit()
176
+
177
+ command = [
178
+ tshark_path,
179
+ "-r",
180
+ pcap_path,
181
+ "-Y",
182
+ f"({display_filter})",
183
+ "-T",
184
+ "fields",
185
+ "-e",
186
+ "http.response.code", # 0
187
+ "-e",
188
+ "http.request_in", # 1
189
+ "-e",
190
+ "tcp.reassembled.data", # 2
191
+ "-e",
192
+ "frame.number", # 3
193
+ "-e",
194
+ "tcp.payload", # 4
195
+ "-e",
196
+ "frame.time_epoch", # 5
197
+ "-e",
198
+ "exported_pdu.exported_pdu", # 6
199
+ "-e",
200
+ "http.request.full_uri", # 7
201
+ "-e",
202
+ "http.file_data", # 8
203
+ "-e",
204
+ "tcp.segment.count", # 9
205
+ "-E",
206
+ "header=n",
207
+ "-E",
208
+ "separator=/t",
209
+ "-E",
210
+ "quote=n",
211
+ "-E",
212
+ "occurrence=f",
213
+ ]
214
+
215
+ logger.debug(f"执行 Tshark: {command}")
216
+ BATCH_SIZE = 2000
217
+ MAX_PENDING_BATCHES = 20 # 控制内存中待处理的批次数量 (Backpressure)
218
+
219
+ # 使用 ThreadPoolExecutor 并行处理数据
220
+ max_workers = min(32, (os.cpu_count() or 1) + 4)
221
+
222
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(pcap_path)))
223
+ try:
224
+ with sqlite3.connect(db_path) as conn:
225
+ cursor = conn.cursor()
226
+
227
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
228
+ current_batch = []
229
+ pending_futures = [] # List[Future]
230
+
231
+ def write_results_to_db(results):
232
+ """将一批处理好的结果写入数据库"""
233
+ if not results:
234
+ return
235
+
236
+ db_req_rows = []
237
+ db_resp_rows = []
238
+
239
+ for item in results:
240
+ if item["type"] == "response":
241
+ db_resp_rows.append((item["frame_num"], item["header"], item["file_data"], item["time_epoch"], item["request_in"]))
242
+ else:
243
+ db_req_rows.append((item["frame_num"], item["header"], item["file_data"], item["full_uri"], item["time_epoch"]))
244
+
245
+ if db_req_rows:
246
+ cursor.executemany("INSERT OR REPLACE INTO requests VALUES (?,?,?,?,?)", db_req_rows)
247
+ if db_resp_rows:
248
+ cursor.executemany("INSERT OR REPLACE INTO responses VALUES (?,?,?,?,?)", db_resp_rows)
249
+
250
+ def submit_batch():
251
+ """提交当前批次到线程池"""
252
+ if not current_batch:
253
+ return
254
+
255
+ # Copy batch data for the thread (list slicing is fast)
256
+ batch_data = current_batch[:]
257
+ future = executor.submit(PacketParser.process_batch, batch_data)
258
+ pending_futures.append(future)
259
+ current_batch.clear()
260
+
261
+ # --- Main Pipeline Loop ---
262
+ if process.stdout:
263
+ for line in process.stdout:
264
+ current_batch.append(line)
265
+
266
+ if len(current_batch) >= BATCH_SIZE:
267
+ submit_batch()
268
+
269
+ # Backpressure: 如果积压的任务太多,主线程暂停读取,先处理掉最早的一个
270
+ # 这样既保证了 Pipeline 流动,又防止内存爆掉
271
+ if len(pending_futures) >= MAX_PENDING_BATCHES:
272
+ oldest_future = pending_futures.pop(0)
273
+ write_results_to_db(oldest_future.result())
274
+
275
+ # --- Drain Pipeline ---
276
+ # 提交剩余数据
277
+ submit_batch()
278
+
279
+ # 等待所有剩余任务完成
280
+ for future in pending_futures:
281
+ write_results_to_db(future.result())
282
+
283
+ # 创建索引和元数据
284
+ cursor.execute("CREATE INDEX idx_resp_req_in ON responses(request_in)")
285
+ pcap_mtime = os.path.getmtime(pcap_path)
286
+ pcap_size = os.path.getsize(pcap_path)
287
+ cursor.execute("INSERT INTO meta_info (filter, pcap_path, pcap_mtime, pcap_size) VALUES (?, ?, ?, ?)", (display_filter, pcap_path, pcap_mtime, pcap_size))
288
+ conn.commit()
289
+
290
+ except Exception as e:
291
+ logger.error(f"解析错误: {e}")
292
+ if process.poll() is None:
293
+ process.terminate()
294
+ finally:
295
+ if process.poll() is None:
296
+ process.terminate()
297
+
298
+ @staticmethod
299
+ def get_tshark_path(tshark_path: Optional[str]) -> str:
300
+ default_tshark_path = get_default_tshark_path()
301
+ use_path = tshark_path if tshark_path and os.path.exists(tshark_path) else default_tshark_path
302
+ if not use_path or not os.path.exists(use_path):
303
+ logger.critical("未找到 Tshark,请检查路径配置")
304
+ exit(-1)
305
+ return use_path
@@ -0,0 +1,27 @@
1
+ from dataclasses import dataclass
2
+ from typing import NamedTuple, Optional
3
+
4
+
5
+ @dataclass
6
+ class Request:
7
+ __slots__ = ("frame_num", "header", "file_data", "full_uri", "time_epoch")
8
+ frame_num: int
9
+ header: bytes
10
+ file_data: bytes
11
+ full_uri: str
12
+ time_epoch: float
13
+
14
+
15
+ @dataclass
16
+ class Response:
17
+ __slots__ = ("frame_num", "header", "file_data", "time_epoch", "_request_in")
18
+ frame_num: int
19
+ header: bytes
20
+ file_data: bytes
21
+ time_epoch: float
22
+ _request_in: Optional[int]
23
+
24
+
25
+ class HttpPair(NamedTuple):
26
+ request: Optional[Request]
27
+ response: Optional[Response]
@@ -0,0 +1,185 @@
1
+ import binascii
2
+ import contextlib
3
+ import gzip
4
+ from typing import List, Optional, Tuple
5
+ from urllib import parse
6
+
7
+ from .logging_config import logger
8
+
9
+
10
+ class PacketParser:
11
+ @staticmethod
12
+ def parse_packet_data(row: list) -> Tuple[int, int, float, str, bytes, bytes]:
13
+ """
14
+ 解析 Tshark 输出的一行数据
15
+ row definition (all bytes):
16
+ 0: http.response.code
17
+ 1: http.request_in
18
+ 2: tcp.reassembled.data
19
+ 3: frame.number
20
+ 4: tcp.payload
21
+ 5: frame.time_epoch
22
+ 6: exported_pdu.exported_pdu
23
+ 7: http.request.full_uri
24
+ 8: http.file_data
25
+ 9: tcp.segment.count
26
+ """
27
+ frame_num = int(row[3])
28
+ request_in = int(row[1]) if row[1] else frame_num
29
+ # Decode only URI to string
30
+ full_uri = parse.unquote(row[7].decode("utf-8", errors="replace")) if row[7] else ""
31
+ time_epoch = float(row[5])
32
+ http_file_data = row[8] if len(row) > 8 else b""
33
+
34
+ # Logic for Raw Packet (Header Source)
35
+ is_reassembled = len(row) > 9 and row[9]
36
+
37
+ if is_reassembled and row[2]:
38
+ full_request = row[2]
39
+ elif row[4]:
40
+ full_request = row[4]
41
+ else:
42
+ # Fallback (e.g. Exported PDU)
43
+ full_request = row[2] if row[2] else (row[6] if row[6] else b"")
44
+
45
+ return frame_num, request_in, time_epoch, full_uri, full_request, http_file_data
46
+
47
+ @staticmethod
48
+ def split_http_headers(file_data: bytes) -> Tuple[bytes, bytes]:
49
+ headerEnd = file_data.find(b"\r\n\r\n")
50
+ if headerEnd != -1:
51
+ return file_data[: headerEnd + 4], file_data[headerEnd + 4 :]
52
+ elif file_data.find(b"\n\n") != -1:
53
+ headerEnd = file_data.index(b"\n\n") + 2
54
+ return file_data[:headerEnd], file_data[headerEnd:]
55
+ return b"", file_data
56
+
57
+ @staticmethod
58
+ def dechunk_http_response(file_data: bytes) -> bytes:
59
+ """解码分块TCP数据"""
60
+ if not file_data:
61
+ return b""
62
+
63
+ chunks = []
64
+ cursor = 0
65
+ total_len = len(file_data)
66
+
67
+ while cursor < total_len:
68
+ newline_idx = file_data.find(b"\n", cursor)
69
+ if newline_idx == -1:
70
+ raise ValueError("Not chunked data")
71
+
72
+ size_line = file_data[cursor:newline_idx].strip()
73
+ if not size_line:
74
+ cursor = newline_idx + 1
75
+ continue
76
+
77
+ chunk_size = int(size_line, 16)
78
+ if chunk_size == 0:
79
+ break
80
+
81
+ data_start = newline_idx + 1
82
+ data_end = data_start + chunk_size
83
+
84
+ if data_end > total_len:
85
+ chunks.append(file_data[data_start:])
86
+ break
87
+
88
+ chunks.append(file_data[data_start:data_end])
89
+
90
+ cursor = data_end
91
+ while cursor < total_len and file_data[cursor] in (13, 10):
92
+ cursor += 1
93
+
94
+ return b"".join(chunks)
95
+
96
+ @staticmethod
97
+ def extract_http_file_data(full_request: bytes, http_file_data: bytes) -> Tuple[bytes, bytes]:
98
+ """
99
+ 提取HTTP请求或响应中的文件数据 (混合模式 - 二进制优化版)
100
+ """
101
+ header = b""
102
+ file_data = b""
103
+
104
+ try:
105
+ # --- 1. 提取 Header ---
106
+ if full_request:
107
+ raw_bytes = binascii.unhexlify(full_request)
108
+ h_part, _ = PacketParser.split_http_headers(raw_bytes)
109
+ header = h_part
110
+
111
+ # --- 2. 提取 Body ---
112
+ if http_file_data:
113
+ try:
114
+ file_data = binascii.unhexlify(http_file_data)
115
+ return header, file_data
116
+ except binascii.Error:
117
+ logger.warning("解析 http.file_data Hex 失败,尝试回退到原始方式")
118
+
119
+ # --- 3. 回退模式 (Fallback) ---
120
+ if full_request and not file_data:
121
+ raw_bytes = binascii.unhexlify(full_request)
122
+ _, body_part = PacketParser.split_http_headers(raw_bytes)
123
+
124
+ with contextlib.suppress(Exception):
125
+ body_part = PacketParser.dechunk_http_response(body_part)
126
+
127
+ with contextlib.suppress(Exception):
128
+ if body_part.startswith(b"\x1f\x8b"):
129
+ body_part = gzip.decompress(body_part)
130
+
131
+ file_data = body_part
132
+ return header, file_data
133
+
134
+ except ValueError as e:
135
+ logger.error(f"Hex转换失败: {str(e)[:100]}...")
136
+ return b"", b""
137
+ except Exception as e:
138
+ logger.error(f"解析HTTP数据未知错误: {e}")
139
+ return b"", b""
140
+
141
+ @staticmethod
142
+ def process_row(line: bytes) -> Optional[dict]:
143
+ """
144
+ 处理单行数据,返回结构化结果供主线程写入
145
+ """
146
+ line = line.rstrip(b"\r\n")
147
+ if not line:
148
+ return None
149
+
150
+ row = line.split(b"\t")
151
+ try:
152
+ frame_num, request_in, time_epoch, full_uri, full_request, http_file_data = PacketParser.parse_packet_data(row)
153
+
154
+ if not full_request and not http_file_data:
155
+ return None
156
+
157
+ header, file_data = PacketParser.extract_http_file_data(full_request, http_file_data)
158
+
159
+ # row[0] is http.response.code (bytes)
160
+ is_response = bool(row[0])
161
+
162
+ return {
163
+ "type": "response" if is_response else "request",
164
+ "frame_num": frame_num,
165
+ "header": header,
166
+ "file_data": file_data,
167
+ "time_epoch": time_epoch,
168
+ "request_in": request_in, # Only useful for Response
169
+ "full_uri": full_uri, # Only useful for Request
170
+ }
171
+
172
+ except Exception:
173
+ return None
174
+
175
+ @staticmethod
176
+ def process_batch(lines: List[bytes]) -> List[dict]:
177
+ """
178
+ 批量处理行数据,减少函数调用开销
179
+ """
180
+ results = []
181
+ for line in lines:
182
+ res = PacketParser.process_row(line)
183
+ if res:
184
+ results.append(res)
185
+ return results
@@ -15,8 +15,9 @@ def configure_logger(logger_name, level=logging.DEBUG) -> logging.Logger:
15
15
  console_handler.setFormatter(formatter)
16
16
  return logger
17
17
 
18
+
18
19
  logger = configure_logger("FlowAnalyzer", logging.INFO)
19
20
 
20
- if __name__ == '__main__':
21
+ if __name__ == "__main__":
21
22
  logger = configure_logger("FlowAnalyzer")
22
23
  logger.info("This is a test!")
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: FlowAnalyzer
3
- Version: 0.4.3
3
+ Version: 0.4.4
4
4
  Summary: FlowAnalyzer是一个流量分析器,用于解析和处理tshark导出的JSON数据文件
5
5
  Home-page: https://github.com/Byxs20/FlowAnalyzer
6
6
  Author: Byxs20
@@ -15,6 +15,14 @@ Classifier: Programming Language :: Python :: 3.8
15
15
  Classifier: Programming Language :: Python :: 3.9
16
16
  Description-Content-Type: text/markdown
17
17
  License-File: LICENSE
18
+ Dynamic: author
19
+ Dynamic: author-email
20
+ Dynamic: classifier
21
+ Dynamic: description
22
+ Dynamic: description-content-type
23
+ Dynamic: home-page
24
+ Dynamic: license-file
25
+ Dynamic: summary
18
26
 
19
27
  # FlowAnalyzer
20
28
 
@@ -28,9 +36,11 @@ License-File: LICENSE
28
36
 
29
37
  为了解决传统解析方式慢、内存占用高的问题,FlowAnalyzer 进行了核心架构升级:**流式解析 + SQLite 智能缓存**。
30
38
 
31
- ### 1. ⚡️ 高性能流式解析
32
- - **极低内存占用**:不再将整个 JSON 读入内存。通过 `subprocess` 管道对接 Tshark 输出,结合 `ijson` 进行增量解析。
33
- - **无中间文件**:解析过程中不生成体积巨大的临时 JSON 文件,直接入库。
39
+ ### 1. ⚡️ 高性能流式解析 (多线程流水线)
40
+ - **多线程并行**:采用 `ThreadPoolExecutor` 构建流水线,主线程负责读取 Tshark 输出,子线程并行解析数据包,充分利用多核 CPU。
41
+ - **批量处理**:引入 Batch 机制(默认 2000 包/批),大幅减少数据库事务开销和 Python 函数调用损耗。
42
+ - **内存背压控制 (Backpressure)**:智能监控待处理队列长度,防止在处理高速流量时内存溢出。
43
+ - **极低内存占用**:不再将整个 JSON 读入内存。通过 `subprocess` 管道流式处理,解析过程中不生成体积巨大的临时文件。
34
44
 
35
45
  ### 2. 💾 智能缓存机制
36
46
  - **自动缓存**:首次分析 `test.pcap` 时,会自动生成同级目录下的 `test.db`。
@@ -51,7 +61,7 @@ License-File: LICENSE
51
61
 
52
62
  | 特性 | 旧版架构 | **新版架构 (FlowAnalyzer)** |
53
63
  | :----------- | :---------------------------- | :---------------------------------- |
54
- | **解析流程** | 生成巨大 JSON -> 全量读入内存 | Tshark流 -> 管道 -> ijson -> SQLite |
64
+ | **解析流程** | 生成巨大 JSON -> 全量读入内存 | Tshark流 -> 多线程Batch解析 -> SQLite |
55
65
  | **内存占用** | 极高 (易 OOM) | **极低 (内存稳定)** |
56
66
  | **二次加载** | 需重新解析 | **直接读取 DB (0秒)** |
57
67
  | **磁盘占用** | 巨大的临时 JSON 文件 | 轻量级 SQLite 文件 |
@@ -63,11 +73,11 @@ License-File: LICENSE
63
73
  请确保您的环境中已安装 Python 3 和 Tshark (Wireshark)。
64
74
 
65
75
  ```bash
66
- # 安装 FlowAnalyzer 及其依赖 ijson
67
- pip3 install FlowAnalyzer ijson
76
+ # 安装 FlowAnalyzer
77
+ pip3 install FlowAnalyzer
68
78
 
69
79
  # 或者使用国内源加速
70
- pip3 install FlowAnalyzer ijson -i https://pypi.org/simple
80
+ pip3 install FlowAnalyzer -i https://pypi.org/simple
71
81
  ```
72
82
 
73
83
  ---
@@ -2,6 +2,8 @@ LICENSE
2
2
  README.md
3
3
  setup.py
4
4
  FlowAnalyzer/FlowAnalyzer.py
5
+ FlowAnalyzer/Models.py
6
+ FlowAnalyzer/PacketParser.py
5
7
  FlowAnalyzer/Path.py
6
8
  FlowAnalyzer/__init__.py
7
9
  FlowAnalyzer/logging_config.py
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: FlowAnalyzer
3
- Version: 0.4.3
3
+ Version: 0.4.4
4
4
  Summary: FlowAnalyzer是一个流量分析器,用于解析和处理tshark导出的JSON数据文件
5
5
  Home-page: https://github.com/Byxs20/FlowAnalyzer
6
6
  Author: Byxs20
@@ -15,6 +15,14 @@ Classifier: Programming Language :: Python :: 3.8
15
15
  Classifier: Programming Language :: Python :: 3.9
16
16
  Description-Content-Type: text/markdown
17
17
  License-File: LICENSE
18
+ Dynamic: author
19
+ Dynamic: author-email
20
+ Dynamic: classifier
21
+ Dynamic: description
22
+ Dynamic: description-content-type
23
+ Dynamic: home-page
24
+ Dynamic: license-file
25
+ Dynamic: summary
18
26
 
19
27
  # FlowAnalyzer
20
28
 
@@ -28,9 +36,11 @@ License-File: LICENSE
28
36
 
29
37
  为了解决传统解析方式慢、内存占用高的问题,FlowAnalyzer 进行了核心架构升级:**流式解析 + SQLite 智能缓存**。
30
38
 
31
- ### 1. ⚡️ 高性能流式解析
32
- - **极低内存占用**:不再将整个 JSON 读入内存。通过 `subprocess` 管道对接 Tshark 输出,结合 `ijson` 进行增量解析。
33
- - **无中间文件**:解析过程中不生成体积巨大的临时 JSON 文件,直接入库。
39
+ ### 1. ⚡️ 高性能流式解析 (多线程流水线)
40
+ - **多线程并行**:采用 `ThreadPoolExecutor` 构建流水线,主线程负责读取 Tshark 输出,子线程并行解析数据包,充分利用多核 CPU。
41
+ - **批量处理**:引入 Batch 机制(默认 2000 包/批),大幅减少数据库事务开销和 Python 函数调用损耗。
42
+ - **内存背压控制 (Backpressure)**:智能监控待处理队列长度,防止在处理高速流量时内存溢出。
43
+ - **极低内存占用**:不再将整个 JSON 读入内存。通过 `subprocess` 管道流式处理,解析过程中不生成体积巨大的临时文件。
34
44
 
35
45
  ### 2. 💾 智能缓存机制
36
46
  - **自动缓存**:首次分析 `test.pcap` 时,会自动生成同级目录下的 `test.db`。
@@ -51,7 +61,7 @@ License-File: LICENSE
51
61
 
52
62
  | 特性 | 旧版架构 | **新版架构 (FlowAnalyzer)** |
53
63
  | :----------- | :---------------------------- | :---------------------------------- |
54
- | **解析流程** | 生成巨大 JSON -> 全量读入内存 | Tshark流 -> 管道 -> ijson -> SQLite |
64
+ | **解析流程** | 生成巨大 JSON -> 全量读入内存 | Tshark流 -> 多线程Batch解析 -> SQLite |
55
65
  | **内存占用** | 极高 (易 OOM) | **极低 (内存稳定)** |
56
66
  | **二次加载** | 需重新解析 | **直接读取 DB (0秒)** |
57
67
  | **磁盘占用** | 巨大的临时 JSON 文件 | 轻量级 SQLite 文件 |
@@ -63,11 +73,11 @@ License-File: LICENSE
63
73
  请确保您的环境中已安装 Python 3 和 Tshark (Wireshark)。
64
74
 
65
75
  ```bash
66
- # 安装 FlowAnalyzer 及其依赖 ijson
67
- pip3 install FlowAnalyzer ijson
76
+ # 安装 FlowAnalyzer
77
+ pip3 install FlowAnalyzer
68
78
 
69
79
  # 或者使用国内源加速
70
- pip3 install FlowAnalyzer ijson -i https://pypi.org/simple
80
+ pip3 install FlowAnalyzer -i https://pypi.org/simple
71
81
  ```
72
82
 
73
83
  ---
@@ -10,9 +10,11 @@
10
10
 
11
11
  为了解决传统解析方式慢、内存占用高的问题,FlowAnalyzer 进行了核心架构升级:**流式解析 + SQLite 智能缓存**。
12
12
 
13
- ### 1. ⚡️ 高性能流式解析
14
- - **极低内存占用**:不再将整个 JSON 读入内存。通过 `subprocess` 管道对接 Tshark 输出,结合 `ijson` 进行增量解析。
15
- - **无中间文件**:解析过程中不生成体积巨大的临时 JSON 文件,直接入库。
13
+ ### 1. ⚡️ 高性能流式解析 (多线程流水线)
14
+ - **多线程并行**:采用 `ThreadPoolExecutor` 构建流水线,主线程负责读取 Tshark 输出,子线程并行解析数据包,充分利用多核 CPU。
15
+ - **批量处理**:引入 Batch 机制(默认 2000 包/批),大幅减少数据库事务开销和 Python 函数调用损耗。
16
+ - **内存背压控制 (Backpressure)**:智能监控待处理队列长度,防止在处理高速流量时内存溢出。
17
+ - **极低内存占用**:不再将整个 JSON 读入内存。通过 `subprocess` 管道流式处理,解析过程中不生成体积巨大的临时文件。
16
18
 
17
19
  ### 2. 💾 智能缓存机制
18
20
  - **自动缓存**:首次分析 `test.pcap` 时,会自动生成同级目录下的 `test.db`。
@@ -33,7 +35,7 @@
33
35
 
34
36
  | 特性 | 旧版架构 | **新版架构 (FlowAnalyzer)** |
35
37
  | :----------- | :---------------------------- | :---------------------------------- |
36
- | **解析流程** | 生成巨大 JSON -> 全量读入内存 | Tshark流 -> 管道 -> ijson -> SQLite |
38
+ | **解析流程** | 生成巨大 JSON -> 全量读入内存 | Tshark流 -> 多线程Batch解析 -> SQLite |
37
39
  | **内存占用** | 极高 (易 OOM) | **极低 (内存稳定)** |
38
40
  | **二次加载** | 需重新解析 | **直接读取 DB (0秒)** |
39
41
  | **磁盘占用** | 巨大的临时 JSON 文件 | 轻量级 SQLite 文件 |
@@ -45,11 +47,11 @@
45
47
  请确保您的环境中已安装 Python 3 和 Tshark (Wireshark)。
46
48
 
47
49
  ```bash
48
- # 安装 FlowAnalyzer 及其依赖 ijson
49
- pip3 install FlowAnalyzer ijson
50
+ # 安装 FlowAnalyzer
51
+ pip3 install FlowAnalyzer
50
52
 
51
53
  # 或者使用国内源加速
52
- pip3 install FlowAnalyzer ijson -i https://pypi.org/simple
54
+ pip3 install FlowAnalyzer -i https://pypi.org/simple
53
55
  ```
54
56
 
55
57
  ---
@@ -7,7 +7,7 @@ with open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8"
7
7
 
8
8
  setup(
9
9
  name="FlowAnalyzer",
10
- version="0.4.3",
10
+ version="0.4.4",
11
11
  description="FlowAnalyzer是一个流量分析器,用于解析和处理tshark导出的JSON数据文件",
12
12
  author="Byxs20",
13
13
  author_email="97766819@qq.com",
@@ -1,481 +0,0 @@
1
- import contextlib
2
- import csv
3
- import gzip
4
- import os
5
- import sqlite3
6
- import subprocess
7
- from dataclasses import dataclass
8
- from typing import Iterable, NamedTuple, Optional, Tuple
9
- from urllib import parse
10
-
11
- from .logging_config import logger
12
- from .Path import get_default_tshark_path
13
-
14
-
15
- @dataclass
16
- class Request:
17
- __slots__ = ("frame_num", "header", "file_data", "full_uri", "time_epoch")
18
- frame_num: int
19
- header: bytes
20
- file_data: bytes
21
- full_uri: str
22
- time_epoch: float
23
-
24
-
25
- @dataclass
26
- class Response:
27
- __slots__ = ("frame_num", "header", "file_data", "time_epoch", "_request_in")
28
- frame_num: int
29
- header: bytes
30
- file_data: bytes
31
- time_epoch: float
32
- _request_in: Optional[int]
33
-
34
-
35
- class HttpPair(NamedTuple):
36
- request: Optional[Request]
37
- response: Optional[Response]
38
-
39
-
40
- class FlowAnalyzer:
41
- """
42
- FlowAnalyzer 流量分析器 (智能缓存版)
43
- 特点:
44
- 1. Tshark -> Pipe -> ijson -> SQLite (无中间JSON文件)
45
- 2. 智能校验:自动比对 Filter 和文件修改时间,防止缓存错乱
46
- 3. 存储优化:数据库文件生成在流量包同级目录下
47
- """
48
-
49
- def __init__(self, db_path: str):
50
- """
51
- 初始化 FlowAnalyzer
52
- :param db_path: 数据库文件路径 (由 get_json_data 返回)
53
- """
54
- # 路径兼容处理
55
- if db_path.endswith(".json"):
56
- possible_db = db_path + ".db"
57
- if os.path.exists(possible_db):
58
- self.db_path = possible_db
59
- else:
60
- self.db_path = db_path
61
- else:
62
- self.db_path = db_path
63
-
64
- self.check_db_file()
65
-
66
- def check_db_file(self):
67
- """检查数据库文件是否存在"""
68
- if not os.path.exists(self.db_path):
69
- raise FileNotFoundError(f"未找到数据文件或缓存数据库: {self.db_path},请先调用 get_json_data 生成。")
70
-
71
- def generate_http_dict_pairs(self) -> Iterable[HttpPair]:
72
- """生成HTTP请求和响应信息的字典对 (SQL JOIN 高性能版)"""
73
- if not os.path.exists(self.db_path):
74
- return
75
-
76
- with sqlite3.connect(self.db_path) as conn:
77
- cursor = conn.cursor()
78
- # 开启查询优化
79
- cursor.execute("PRAGMA query_only = 1;")
80
-
81
- # === 第一步:配对查询 ===
82
- # 利用 SQLite 的 LEFT JOIN 直接匹配请求和响应
83
- # 避免将所有数据加载到 Python 内存中
84
- sql_pair = """
85
- SELECT
86
- req.frame_num, req.header, req.file_data, req.full_uri, req.time_epoch, -- 0-4 (Request)
87
- resp.frame_num, resp.header, resp.file_data, resp.time_epoch, resp.request_in -- 5-9 (Response)
88
- FROM requests req
89
- LEFT JOIN responses resp ON req.frame_num = resp.request_in
90
- ORDER BY req.frame_num ASC
91
- """
92
-
93
- cursor.execute(sql_pair)
94
-
95
- # 流式遍历结果,内存占用极低
96
- for row in cursor:
97
- # 构建 Request 对象
98
- # 注意处理 NULL 情况,虽然 requests 表理论上不为空,但防万一用 or b''
99
- req = Request(frame_num=row[0], header=row[1] or b"", file_data=row[2] or b"", full_uri=row[3] or "", time_epoch=row[4])
100
-
101
- resp = None
102
- # 如果 row[5] (Response frame_num) 不为空,说明匹配到了响应
103
- if row[5] is not None:
104
- resp = Response(frame_num=row[5], header=row[6] or b"", file_data=row[7] or b"", time_epoch=row[8], _request_in=row[9])
105
-
106
- yield HttpPair(request=req, response=resp)
107
-
108
- # === 第二步:孤儿响应查询 ===
109
- # 找出那些有 request_in 但找不到对应 Request 的响应包
110
- sql_orphan = """
111
- SELECT frame_num, header, file_data, time_epoch, request_in
112
- FROM responses
113
- WHERE request_in NOT IN (SELECT frame_num FROM requests)
114
- """
115
- cursor.execute(sql_orphan)
116
-
117
- for row in cursor:
118
- resp = Response(frame_num=row[0], header=row[1] or b"", file_data=row[2] or b"", time_epoch=row[3], _request_in=row[4])
119
- yield HttpPair(request=None, response=resp)
120
-
121
- # =========================================================================
122
- # 静态方法区域:包含校验逻辑和流式处理
123
- # =========================================================================
124
-
125
- @staticmethod
126
- def get_json_data(file_path: str, display_filter: str, tshark_path: Optional[str] = None) -> str:
127
- """
128
- 获取数据路径 (智能校验版)。
129
-
130
- 逻辑:
131
- 1. 根据 PCAP 路径推算 DB 路径 (位于 PCAP 同级目录)。
132
- 2. 检查 DB 是否存在。
133
- 3. 检查 Filter 和文件元数据是否一致。
134
- 4. 若一致返回路径,不一致则重新解析。
135
- """
136
- if not os.path.exists(file_path):
137
- raise FileNotFoundError("流量包路径不存在:%s" % file_path)
138
-
139
- # --- 修改处:获取流量包的绝对路径和所在目录 ---
140
- abs_file_path = os.path.abspath(file_path)
141
- pcap_dir = os.path.dirname(abs_file_path) # 获取文件所在的文件夹
142
- base_name = os.path.splitext(os.path.basename(abs_file_path))[0]
143
-
144
- # 将 db_path 拼接在流量包所在的目录下
145
- db_path = os.path.join(pcap_dir, f"{base_name}.db")
146
- # ----------------------------------------
147
-
148
- # --- 校验环节 ---
149
- if FlowAnalyzer._is_cache_valid(db_path, abs_file_path, display_filter):
150
- logger.debug(f"缓存校验通过 (Filter匹配且文件未变),使用缓存: [{db_path}]")
151
- return db_path
152
- else:
153
- logger.debug(f"缓存失效或不存在 (Filter变更或文件更新),开始重新解析...")
154
-
155
- # --- 解析环节 ---
156
- tshark_path = FlowAnalyzer.get_tshark_path(tshark_path)
157
- FlowAnalyzer._stream_tshark_to_db(abs_file_path, display_filter, tshark_path, db_path)
158
-
159
- return db_path
160
-
161
- @staticmethod
162
- def get_db_data(file_path: str, display_filter: str, tshark_path: Optional[str] = None) -> str:
163
- """
164
- 获取数据库路径 (get_json_data 的语义化别名)。
165
- 新项目建议使用此方法名,get_json_data 保留用于兼容旧习惯。
166
- """
167
- return FlowAnalyzer.get_json_data(file_path, display_filter, tshark_path)
168
-
169
- @staticmethod
170
- def _is_cache_valid(db_path: str, pcap_path: str, current_filter: str) -> bool:
171
- """
172
- 检查缓存有效性:对比 Filter 字符串和文件元数据
173
- """
174
- if not os.path.exists(db_path) or os.path.getsize(db_path) == 0:
175
- return False
176
-
177
- try:
178
- current_mtime = os.path.getmtime(pcap_path)
179
- current_size = os.path.getsize(pcap_path)
180
-
181
- with sqlite3.connect(db_path) as conn:
182
- cursor = conn.cursor()
183
- cursor.execute("SELECT filter, pcap_mtime, pcap_size FROM meta_info LIMIT 1")
184
- row = cursor.fetchone()
185
-
186
- if not row:
187
- return False
188
-
189
- cached_filter, cached_mtime, cached_size = row
190
-
191
- # 容差 0.1秒
192
- if cached_filter == current_filter and cached_size == current_size and abs(cached_mtime - current_mtime) < 0.1:
193
- return True
194
- else:
195
- logger.debug(f"校验失败: 缓存Filter={cached_filter} vs 当前={current_filter}")
196
- return False
197
-
198
- except sqlite3.OperationalError:
199
- return False
200
- except Exception as e:
201
- logger.warning(f"缓存校验出错: {e},将重新解析")
202
- return False
203
-
204
- @staticmethod
205
- def _stream_tshark_to_db(pcap_path: str, display_filter: str, tshark_path: str, db_path: str):
206
- """流式解析并存入DB,同时记录元数据"""
207
- # 增加 CSV 字段大小限制,防止超大包报错
208
- # 将限制设置为系统最大值,注意 32位系统不要超过 2GB (但 Python int通常是动态的,保险起见设大一点)
209
- # Windows下 sys.maxsize 通常足够大
210
- try:
211
- csv.field_size_limit(500 * 1024 * 1024) # 500 MB
212
- except Exception:
213
- # 如果失败,尝试取最大值
214
- csv.field_size_limit(int(2**31 - 1))
215
-
216
- if os.path.exists(db_path):
217
- os.remove(db_path)
218
-
219
- with sqlite3.connect(db_path) as conn:
220
- cursor = conn.cursor()
221
- cursor.execute("PRAGMA synchronous = OFF")
222
- cursor.execute("PRAGMA journal_mode = MEMORY")
223
-
224
- cursor.execute("CREATE TABLE requests (frame_num INTEGER PRIMARY KEY, header BLOB, file_data BLOB, full_uri TEXT, time_epoch REAL)")
225
- cursor.execute("CREATE TABLE responses (frame_num INTEGER PRIMARY KEY, header BLOB, file_data BLOB, time_epoch REAL, request_in INTEGER)")
226
-
227
- cursor.execute("""
228
- CREATE TABLE meta_info (
229
- id INTEGER PRIMARY KEY,
230
- filter TEXT,
231
- pcap_path TEXT,
232
- pcap_mtime REAL,
233
- pcap_size INTEGER
234
- )
235
- """)
236
- conn.commit()
237
-
238
- # 修改命令为 -T fields 模式
239
- command = [
240
- tshark_path,
241
- "-r",
242
- pcap_path,
243
- "-Y",
244
- f"({display_filter})",
245
- "-T",
246
- "fields",
247
- # 指定输出字段
248
- "-e",
249
- "http.response.code", # 0
250
- "-e",
251
- "http.request_in", # 1
252
- "-e",
253
- "tcp.reassembled.data", # 2
254
- "-e",
255
- "frame.number", # 3
256
- "-e",
257
- "tcp.payload", # 4
258
- "-e",
259
- "frame.time_epoch", # 5
260
- "-e",
261
- "exported_pdu.exported_pdu", # 6
262
- "-e",
263
- "http.request.full_uri", # 7
264
- # 格式控制
265
- "-E",
266
- "header=n", # 不输出表头
267
- "-E",
268
- "separator=|", # 使用 | 分割 (比逗号更安全)
269
- "-E",
270
- "quote=d", # 双引号包裹
271
- "-E",
272
- "occurrence=f", # 每个字段只取第一个值 (First)
273
- ]
274
-
275
- logger.debug(f"执行 Tshark: {command}")
276
-
277
- # 使用 utf-8 编码读取 stdout text mode
278
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(pcap_path)), encoding="utf-8", errors="replace")
279
-
280
- db_req_rows = []
281
- db_resp_rows = []
282
- BATCH_SIZE = 5000
283
-
284
- try:
285
- # 使用 csv.reader 解析 stdout 流
286
- reader = csv.reader(process.stdout, delimiter="|", quotechar='"') # type: ignore
287
- with sqlite3.connect(db_path) as conn:
288
- cursor = conn.cursor()
289
-
290
- for row in reader:
291
- # row 是一个列表,对应上面的 -e 顺序
292
- # [code, req_in, reassembled, frame, payload, epoch, pdu, uri]
293
- if not row:
294
- continue
295
-
296
- try:
297
- # 解析数据
298
- frame_num, request_in, time_epoch, full_uri, full_request = FlowAnalyzer.parse_packet_data(row)
299
-
300
- if not full_request:
301
- continue
302
-
303
- header, file_data = FlowAnalyzer.extract_http_file_data(full_request)
304
-
305
- # 判断是请求还是响应
306
- # http.response.code (index 0) 是否为空
307
- if row[0]:
308
- # Response
309
- db_resp_rows.append((frame_num, header, file_data, time_epoch, request_in))
310
- else:
311
- # Request
312
- db_req_rows.append((frame_num, header, file_data, full_uri, time_epoch))
313
-
314
- # 批量插入
315
- if len(db_req_rows) >= BATCH_SIZE:
316
- cursor.executemany("INSERT OR REPLACE INTO requests VALUES (?,?,?,?,?)", db_req_rows)
317
- db_req_rows.clear()
318
- if len(db_resp_rows) >= BATCH_SIZE:
319
- cursor.executemany("INSERT OR REPLACE INTO responses VALUES (?,?,?,?,?)", db_resp_rows)
320
- db_resp_rows.clear()
321
-
322
- except Exception as e:
323
- # 偶尔可能会有解析失败的行,跳过即可
324
- pass
325
-
326
- # 插入剩余数据
327
- if db_req_rows:
328
- cursor.executemany("INSERT OR REPLACE INTO requests VALUES (?,?,?,?,?)", db_req_rows)
329
- if db_resp_rows:
330
- cursor.executemany("INSERT OR REPLACE INTO responses VALUES (?,?,?,?,?)", db_resp_rows)
331
-
332
- # --- 优化点:插入完数据后再创建索引,速度更快 ---
333
- cursor.execute("CREATE INDEX idx_resp_req_in ON responses(request_in)")
334
-
335
- pcap_mtime = os.path.getmtime(pcap_path)
336
- pcap_size = os.path.getsize(pcap_path)
337
- cursor.execute("INSERT INTO meta_info (filter, pcap_path, pcap_mtime, pcap_size) VALUES (?, ?, ?, ?)", (display_filter, pcap_path, pcap_mtime, pcap_size))
338
-
339
- conn.commit()
340
-
341
- except Exception as e:
342
- logger.error(f"解析错误: {e}")
343
- if process.poll() is None:
344
- process.terminate()
345
- finally:
346
- if process.poll() is None:
347
- process.terminate()
348
-
349
- # --- 辅助静态方法 ---
350
-
351
- @staticmethod
352
- def parse_packet_data(row: list) -> Tuple[int, int, float, str, str]:
353
- # row definition:
354
- # 0: http.response.code
355
- # 1: http.request_in
356
- # 2: tcp.reassembled.data
357
- # 3: frame.number
358
- # 4: tcp.payload
359
- # 5: frame.time_epoch
360
- # 6: exported_pdu.exported_pdu
361
- # 7: http.request.full_uri
362
-
363
- frame_num = int(row[3])
364
- request_in = int(row[1]) if row[1] else frame_num
365
- full_uri = parse.unquote(row[7]) if row[7] else ""
366
- time_epoch = float(row[5])
367
-
368
- if row[2]:
369
- full_request = row[2]
370
- elif row[4]:
371
- full_request = row[4]
372
- else:
373
- full_request = row[6] if row[6] else ""
374
-
375
- return frame_num, request_in, time_epoch, full_uri, full_request
376
-
377
- @staticmethod
378
- def split_http_headers(file_data: bytes) -> Tuple[bytes, bytes]:
379
- headerEnd = file_data.find(b"\r\n\r\n")
380
- if headerEnd != -1:
381
- return file_data[: headerEnd + 4], file_data[headerEnd + 4 :]
382
- elif file_data.find(b"\n\n") != -1:
383
- headerEnd = file_data.index(b"\n\n") + 2
384
- return file_data[:headerEnd], file_data[headerEnd:]
385
- return b"", file_data
386
-
387
- @staticmethod
388
- def dechunck_http_response(file_data: bytes) -> bytes:
389
- """解码分块TCP数据"""
390
- if not file_data:
391
- return b""
392
-
393
- chunks = []
394
- cursor = 0
395
- total_len = len(file_data)
396
-
397
- while cursor < total_len:
398
- # 1. 寻找当前 Chunk Size 行的结束符 (\n)
399
- newline_idx = file_data.find(b"\n", cursor)
400
- if newline_idx == -1:
401
- # 找不到换行符,说明格式不对,抛出异常让外层处理
402
- raise ValueError("Not chunked data")
403
-
404
- # 2. 提取并解析十六进制大小
405
- size_line = file_data[cursor:newline_idx].strip()
406
-
407
- # 处理可能的空行 (例如上一个 Chunk 后的 CRLF)
408
- if not size_line:
409
- cursor = newline_idx + 1
410
- continue
411
-
412
- # 这里不要捕获 ValueError,如果解析失败,直接抛出
413
- # 说明这根本不是 chunk size,而是普通数据
414
- chunk_size = int(size_line, 16)
415
-
416
- # Chunk Size 为 0 表示传输结束
417
- if chunk_size == 0:
418
- break
419
-
420
- # 3. 定位数据区域
421
- data_start = newline_idx + 1
422
- data_end = data_start + chunk_size
423
-
424
- if data_end > total_len:
425
- # 数据被截断,尽力读取
426
- chunks.append(file_data[data_start:])
427
- break
428
-
429
- # 4. 提取数据
430
- chunks.append(file_data[data_start:data_end])
431
-
432
- # 5. 移动游标
433
- cursor = data_end
434
- # 跳过尾随的 \r 和 \n
435
- while cursor < total_len and file_data[cursor] in (13, 10):
436
- cursor += 1
437
-
438
- return b"".join(chunks)
439
-
440
- @staticmethod
441
- def extract_http_file_data(full_request: str) -> Tuple[bytes, bytes]:
442
- """提取HTTP请求或响应中的文件数据 (修复版)"""
443
- # 1. 基础校验
444
- if not full_request:
445
- return b"", b""
446
-
447
- try:
448
- # 转为二进制
449
- raw_bytes = bytes.fromhex(full_request)
450
-
451
- # 分割 Header 和 Body
452
- header, file_data = FlowAnalyzer.split_http_headers(raw_bytes)
453
-
454
- # 处理 Chunked 编码
455
- with contextlib.suppress(Exception):
456
- file_data = FlowAnalyzer.dechunck_http_response(file_data)
457
-
458
- # 处理 Gzip 压缩
459
- with contextlib.suppress(Exception):
460
- if file_data.startswith(b"\x1f\x8b"):
461
- file_data = gzip.decompress(file_data)
462
-
463
- return header, file_data
464
-
465
- except ValueError as e:
466
- # 专门捕获 Hex 转换错误,并打印出来,方便你调试
467
- # 如果你在控制台看到这个错误,说明 Tshark 输出的数据格式非常奇怪
468
- logger.error(f"Hex转换失败: {str(e)[:100]}... 原数据片段: {full_request[:50]}")
469
- return b"", b""
470
- except Exception as e:
471
- logger.error(f"解析HTTP数据未知错误: {e}")
472
- return b"", b""
473
-
474
- @staticmethod
475
- def get_tshark_path(tshark_path: Optional[str]) -> str:
476
- default_tshark_path = get_default_tshark_path()
477
- use_path = tshark_path if tshark_path and os.path.exists(tshark_path) else default_tshark_path
478
- if not use_path or not os.path.exists(use_path):
479
- logger.critical("未找到 Tshark,请检查路径配置")
480
- exit(-1)
481
- return use_path
File without changes
File without changes