Jarvis-Brain 0.1.9.15__tar.gz → 0.1.11.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: Jarvis_Brain
3
- Version: 0.1.9.15
3
+ Version: 0.1.11.2
4
4
  Summary: Jarvis brain mcp
5
5
  Requires-Python: >=3.10
6
6
  Requires-Dist: beautifulsoup4
@@ -8,3 +8,4 @@ Requires-Dist: curl-cffi
8
8
  Requires-Dist: drissionpage
9
9
  Requires-Dist: fastmcp
10
10
  Requires-Dist: minify-html
11
+ Requires-Dist: pillow
@@ -1,12 +1,17 @@
1
+ """
2
+ 这个文件中提供的工具作为独立的Drissionpage mcp工具
3
+ """
1
4
  import hashlib
2
5
  import json
3
6
  import os
7
+ import time
4
8
  from typing import Any
5
9
 
6
10
  from fastmcp import FastMCP
7
11
 
8
12
  from tools.browser_manager import BrowserManager
9
- from tools.tools import compress_html, requests_html, dp_headless_html, assert_waf_cookie, dp_mcp_message_pack
13
+ from tools.tools import compress_html, requests_html, dp_headless_html, assert_waf_cookie, dp_mcp_message_pack, \
14
+ compress_html_js, compress_image_bytes
10
15
  from tools.browser_proxy import DPProxyClient, DPProxyClientManager
11
16
 
12
17
  html_source_code_local_save_path = os.path.join(os.getcwd(), "html-source-code")
@@ -94,10 +99,11 @@ def register_get_html(mcp: FastMCP, browser_manager):
94
99
  file_name_prefix = hashlib.md5(str(tab.title).encode('utf-8')).hexdigest()
95
100
  if not os.path.exists(html_source_code_local_save_path):
96
101
  os.makedirs(html_source_code_local_save_path)
97
- min_html, compress_rate = compress_html(tab.html)
98
- html_str_list = [min_html[i:i + one_turn_max_token] for i in range(0, len(min_html), one_turn_max_token)]
102
+ # min_html, compress_rate = compress_html(tab.html)
103
+ min_html = tab.run_js(compress_html_js)
104
+ # html_str_list = [min_html[i:i + one_turn_max_token] for i in range(0, len(min_html), one_turn_max_token)]
99
105
  html_file_list = []
100
- for index, html_str in enumerate(html_str_list):
106
+ for index, html_str in enumerate([min_html]):
101
107
  file_name = file_name_prefix + f"_{tab_id}_segment{index}.html"
102
108
  abs_path = os.path.join(html_source_code_local_save_path, file_name)
103
109
  with open(abs_path, "w", encoding="utf-8") as f:
@@ -299,3 +305,24 @@ def register_scroll_action(mcp: FastMCP, browser_manager):
299
305
  browser_port=browser_port,
300
306
  tab_id=tab_id,
301
307
  )
308
+
309
+
310
+ def register_get_screenshot(mcp: FastMCP, browser_manager):
311
+ @mcp.tool(name="get_tab_screenshot", description="尝试对传入tab页进行截图,并将截图压缩为1M大小png图片,会返回截图保存路径")
312
+ async def get_tab_screenshot(browser_port: int, tab_id: str) -> dict[str, Any]:
313
+ _browser = browser_manager.get_browser(browser_port)
314
+ target_tab = _browser.get_tab(tab_id)
315
+ if not os.path.exists(html_source_code_local_save_path):
316
+ os.makedirs(html_source_code_local_save_path)
317
+ timestamp = int(time.time() * 1000)
318
+ origin_png = target_tab.get_screenshot(as_bytes="png")
319
+ compress_png = compress_image_bytes(origin_png)
320
+ image_path = os.path.join(html_source_code_local_save_path, f"{browser_port}_{tab_id}_{timestamp}.png")
321
+ with open(image_path, "wb") as f:
322
+ f.write(compress_png)
323
+ return dp_mcp_message_pack(
324
+ message=f"已完成对browser_port={browser_port},tab_id={tab_id}的截屏",
325
+ browser_port=browser_port,
326
+ tab_id=tab_id,
327
+ screenshot_path=image_path
328
+ )
@@ -20,6 +20,7 @@ if "TeamNode-Dp" in enabled_modules:
20
20
  register_get_html(mcp, browser_manager)
21
21
  register_check_selector(mcp, browser_manager)
22
22
  register_pop_first_packet(mcp, browser_manager, client_manager)
23
+ register_get_screenshot(mcp, browser_manager)
23
24
  # 页面交互
24
25
  register_click_action(mcp, browser_manager)
25
26
  register_scroll_action(mcp, browser_manager)
@@ -29,7 +30,7 @@ if "JarvisNode" in enabled_modules:
29
30
 
30
31
 
31
32
  def main():
32
- mcp.run(transport="stdio",show_banner=False)
33
+ mcp.run(transport="stdio", show_banner=False)
33
34
 
34
35
 
35
36
  if __name__ == '__main__':
@@ -1,13 +1,14 @@
1
1
  [project]
2
2
  name = "Jarvis_Brain" # 别人下载时用的名字,必须在 PyPI 上唯一
3
- version = "0.1.9.15"
3
+ version = "0.1.11.2"
4
4
  description = "Jarvis brain mcp"
5
5
  dependencies = [
6
6
  "fastmcp",
7
7
  "DrissionPage",
8
8
  "minify-html",
9
9
  "beautifulsoup4",
10
- "curl_cffi"
10
+ "curl_cffi",
11
+ "pillow"
11
12
  ]
12
13
  requires-python = ">=3.10"
13
14
 
@@ -126,6 +126,51 @@ class DrissionPageListenerProxy:
126
126
  return attr
127
127
 
128
128
 
129
+ def check_data_packet(packet: DataPacket, client: DPProxyClient):
130
+ """
131
+ 封装监听到的数据包,并将其存放在client的packet_queue中
132
+ :param packet:
133
+ :param client:
134
+ :return:
135
+ """
136
+ url = packet.url
137
+ method = packet.request.method
138
+ data = None
139
+ if packet.request.hasPostData:
140
+ data = packet.request.postData
141
+ domain = urlparse(url).netloc
142
+ body = packet.response.body
143
+ body_str = json.dumps(body, ensure_ascii=False, separators=(',', ':'))
144
+ body_str_list = [body_str[i:i + one_turn_max_token] for i in range(0, len(body_str), one_turn_max_token)]
145
+ body_completed = True
146
+ packet_filter = client.packet_filter
147
+ domain_filter = packet_filter.get("domain_filter", None)
148
+ method_filter = packet_filter.get("method_filter", ["GET", "POST"])
149
+ for index, body_str in enumerate(body_str_list):
150
+ # 如果给了domain_filter并且domain没有在domain_filter中时跳过该数据包
151
+ if domain_filter and domain not in domain_filter:
152
+ continue
153
+ # 如果method没有在method_filter中,则跳过该数据包
154
+ if method not in method_filter:
155
+ continue
156
+ if (index + 1) != len(body_str_list):
157
+ body_completed = False
158
+ if packet.response:
159
+ response_headers = packet.response.headers
160
+ else:
161
+ response_headers = {}
162
+ temp_dict = {
163
+ "url": url,
164
+ "body_completed": body_completed,
165
+ "method": method,
166
+ "request_data": data,
167
+ "request_headers": dict(packet.request.headers),
168
+ "response_headers": dict(response_headers),
169
+ "response_body_segment": body_str.replace("\\", ""),
170
+ }
171
+ client.packet_queue.append(temp_dict)
172
+
173
+
129
174
  def check_data_packet(packet: DataPacket, client: DPProxyClient):
130
175
  """
131
176
  封装监听到的数据包,并将其存放在client的packet_queue中
@@ -0,0 +1,267 @@
1
+ import time
2
+ import random
3
+ import os
4
+ import minify_html
5
+ from DrissionPage import ChromiumPage, ChromiumOptions
6
+ from bs4 import BeautifulSoup
7
+ from curl_cffi import requests
8
+ from lxml import html, etree
9
+ import base64
10
+ from PIL import Image
11
+ import io
12
+
13
+ compress_html_js = """
14
+ function getSimplifiedDOM(node) {
15
+ // 1. 处理文本节点
16
+ if (node.nodeType === Node.TEXT_NODE) {
17
+ const text = node.textContent.trim();
18
+ // 限制文本长度,避免大段文章消耗 token,保留前100个字符通常足够定位
19
+ return text ? text.slice(0, 100) + (text.length > 100 ? '...' : '') : null;
20
+ }
21
+
22
+ // 2. 过滤无用标签
23
+ const ignoreTags = ['SCRIPT', 'STYLE', 'NOSCRIPT', 'IFRAME', 'SVG', 'LINK', 'META'];
24
+ if (ignoreTags.includes(node.tagName)) return null;
25
+ if (node.nodeType !== Node.ELEMENT_NODE) return null;
26
+
27
+ // 3. 过滤不可见元素
28
+ const style = window.getComputedStyle(node);
29
+ if (style.display === 'none' || style.visibility === 'hidden' || style.opacity === '0') return null;
30
+ // 过滤宽高太小的元素(往往是埋点空像素)
31
+ const rect = node.getBoundingClientRect();
32
+ if (rect.width === 0 || rect.height === 0) return null;
33
+
34
+ // --- 开始构建标签字符串 ---
35
+ const tagName = node.tagName.toLowerCase();
36
+ let tagStr = tagName;
37
+
38
+ // A. 基础标识符 (ID 和 Class)
39
+ if (node.id) tagStr += `#${node.id}`;
40
+ if (node.className && typeof node.className === 'string') {
41
+ // 过滤掉 Tailwind 等太长且无语义的 class,保留有意义的业务 class
42
+ // 这里简单处理,全部保留,让 LLM 自己判断
43
+ const classes = node.className.trim().split(/\s+/);
44
+ if (classes.length > 0) tagStr += `.${classes.join('.')}`;
45
+ }
46
+
47
+ // B. 关键属性白名单 (这是你指出问题的核心修复)
48
+ const props = [];
49
+
50
+ // 通用重要属性
51
+ if (node.getAttribute('role')) props.push(`role="${node.getAttribute('role')}"`);
52
+ if (node.getAttribute('aria-label')) props.push(`aria-label="${node.getAttribute('aria-label')}"`);
53
+ if (node.getAttribute('title')) props.push(`title="${node.getAttribute('title')}"`);
54
+
55
+ // 特定标签的特定属性
56
+ if (tagName === 'a') {
57
+ const href = node.getAttribute('href');
58
+ // 只保留有意义的链接,忽略 javascript:;
59
+ if (href && !href.startsWith('javascript')) props.push(`href="${href}"`);
60
+ } else if (tagName === 'input' || tagName === 'textarea' || tagName === 'select') {
61
+ if (node.getAttribute('type')) props.push(`type="${node.getAttribute('type')}"`);
62
+ if (node.getAttribute('name')) props.push(`name="${node.getAttribute('name')}"`);
63
+ if (node.getAttribute('placeholder')) props.push(`placeholder="${node.getAttribute('placeholder')}"`);
64
+ if (node.disabled) props.push('disabled');
65
+ if (node.checked) props.push('checked');
66
+ } else if (tagName === 'button') {
67
+ if (node.getAttribute('type')) props.push(`type="${node.getAttribute('type')}"`);
68
+ } else if (tagName === 'img') {
69
+ if (node.getAttribute('alt')) props.push(`alt="${node.getAttribute('alt')}"`);
70
+ }
71
+
72
+ if (props.length > 0) {
73
+ tagStr += ` ${props.join(' ')}`;
74
+ }
75
+
76
+ // 4. 递归子节点
77
+ const children = Array.from(node.childNodes)
78
+ .map(getSimplifiedDOM)
79
+ .filter(n => n !== null);
80
+
81
+ // 5. 组装输出
82
+ // 如果没有子节点,也没有ID/Class,也不是输入框/图片/链接,那这个标签可能只是布局用的 div,可以考虑跳过它直接返回子节点内容
83
+ // 但为了保持结构完整,我们暂时保留它
84
+ if (children.length === 0) {
85
+ // 自闭合标签或空标签
86
+ return `<${tagStr} />`;
87
+ }
88
+ return `<${tagStr}>${children.join('')}</${tagName}>`; // 结束标签只保留 tagName 节省 token
89
+ }
90
+
91
+ return getSimplifiedDOM(document.body);
92
+ """
93
+
94
+
95
+ # 使用requests获取html,用于测试是否使用了瑞数和jsl
96
+ def requests_html(url):
97
+ headers = {
98
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
99
+ }
100
+ response = requests.get(url, headers=headers, verify=False)
101
+ response.encoding = "utf-8"
102
+ return response.text, response.status_code
103
+
104
+
105
+ # 使用dp无头模式获取html,用于测试是否使用了其他waf,如移动waf
106
+ def dp_headless_html(url):
107
+ opt = ChromiumOptions().headless(True)
108
+ opt.set_argument('--no-sandbox')
109
+ """创建新的浏览器实例"""
110
+ random_port = random.randint(9934, 10034)
111
+ custom_data_dir = os.path.join(os.path.expanduser('~'), 'DrissionPage', "userData", f"{random_port}")
112
+ opt.set_user_data_path(custom_data_dir) # 设置用户数据路径
113
+ opt.set_local_port(random_port)
114
+ page = ChromiumPage(opt)
115
+ tab = page.latest_tab
116
+ tab.get(url)
117
+ # todo: 目前没有更好的方式,为了数据渲染完全,只能硬等【受网速波动影响比较大】
118
+ time.sleep(10)
119
+ page_html = tab.html
120
+ # 无头浏览器在用完之后一定要记得再page级别进行quit
121
+ page.quit()
122
+ return page_html
123
+
124
+
125
+ # 压缩html
126
+ def compress_html(content, only_text=False):
127
+ doc = html.fromstring(content)
128
+ # 删除 style 和 script 标签
129
+ for element in doc.xpath('//style | //script'):
130
+ element.getparent().remove(element)
131
+
132
+ # 删除 link 标签
133
+ for link in doc.xpath('//link[@rel="stylesheet"]'):
134
+ link.getparent().remove(link)
135
+
136
+ # 删除 meta 标签(新增功能)
137
+ for meta in doc.xpath('//meta'):
138
+ meta.getparent().remove(meta)
139
+
140
+ for svg in doc.xpath('//svg'):
141
+ # 获取 SVG 内的文本内容
142
+ text_content = svg.text_content()
143
+ # 创建一个新的文本节点替换 SVG
144
+ parent = svg.getparent()
145
+ if parent is not None:
146
+ parent.text = (parent.text or '') + text_content
147
+ parent.remove(svg)
148
+
149
+ # 删除 style 属性
150
+ for element in doc.xpath('//*[@style]'):
151
+ element.attrib.pop('style')
152
+
153
+ # 删除所有 on* 事件属性
154
+ for element in doc.xpath('//*'):
155
+ for attr in list(element.attrib.keys()):
156
+ if attr.startswith('on'):
157
+ element.attrib.pop(attr)
158
+
159
+ result = etree.tostring(doc, encoding='unicode')
160
+ result = minify_html.minify(result)
161
+ compress_rate = round(len(content) / len(result) * 100)
162
+ print(f"html压缩比=> {compress_rate}%")
163
+ if not only_text:
164
+ return result, compress_rate
165
+ soup = BeautifulSoup(result, 'html.parser')
166
+ result = soup.get_text(strip=True)
167
+ return result, compress_rate
168
+
169
+
170
+ # 通过cookie判断是否有waf,需要通过遇到的例子,不断的完善cookie判别函数
171
+ def assert_waf_cookie(cookies: list):
172
+ for cookie in cookies:
173
+ cookie_name = cookie['name']
174
+ cookie_value = cookie['value']
175
+ if len(cookie_name) == 13 and len(cookie_value) == 88:
176
+ return True, "瑞数"
177
+ if "_jsl" in cookie_name:
178
+ return True, "加速乐"
179
+ return False, "没有waf"
180
+
181
+
182
+ # 对dp_mcp的消息打包
183
+ def dp_mcp_message_pack(message: str, **kwargs):
184
+ text_obj = {key: value for key, value in kwargs.items()}
185
+ text_obj.update({"message": message})
186
+ return {
187
+ "content": [{
188
+ "type": "text",
189
+ # "text": json.dumps(text_obj, ensure_ascii=False)
190
+ "text": text_obj
191
+ }]
192
+ }
193
+
194
+
195
+ def btyes2Base64Img(target_byte):
196
+ """
197
+ 把byte转为base64,用于传输图片
198
+ :param target_byte:
199
+ :return:
200
+ """
201
+ return "data:image/png;base64," + base64.b64encode(target_byte).decode()
202
+
203
+
204
+ def compress_image_bytes(input_bytes, target_size_mb=1):
205
+ """
206
+ 压缩图片字节数据到目标大小
207
+
208
+ 参数:
209
+ input_bytes: 输入图片的字节数据
210
+ target_size_mb: 目标大小(MB),默认1MB
211
+
212
+ 返回:
213
+ 压缩后的图片字节数据
214
+ """
215
+ target_size = target_size_mb * 1024 * 1024 # 转换为字节
216
+
217
+ # 从字节数据打开图片
218
+ img = Image.open(io.BytesIO(input_bytes))
219
+
220
+ # 如果是PNG或其他格式,转换为RGB
221
+ if img.mode in ('RGBA', 'LA', 'P'):
222
+ img = img.convert('RGB')
223
+
224
+ # 初始质量设置
225
+ quality = 95
226
+
227
+ # 先尝试压缩
228
+ output_buffer = io.BytesIO()
229
+ img.save(output_buffer, 'JPEG', quality=quality, optimize=True)
230
+ output_bytes = output_buffer.getvalue()
231
+
232
+ # 如果文件仍然太大,逐步降低质量
233
+ while len(output_bytes) > target_size and quality > 10:
234
+ quality -= 5
235
+ output_buffer = io.BytesIO()
236
+ img.save(output_buffer, 'JPEG', quality=quality, optimize=True)
237
+ output_bytes = output_buffer.getvalue()
238
+
239
+ # 如果降低质量还不够,尝试缩小尺寸
240
+ if len(output_bytes) > target_size:
241
+ width, height = img.size
242
+
243
+ while len(output_bytes) > target_size and quality > 10:
244
+ # 缩小10%
245
+ width = int(width * 0.9)
246
+ height = int(height * 0.9)
247
+ img_resized = img.resize((width, height), Image.Resampling.LANCZOS)
248
+ output_buffer = io.BytesIO()
249
+ img_resized.save(output_buffer, 'JPEG', quality=quality, optimize=True)
250
+ output_bytes = output_buffer.getvalue()
251
+
252
+ final_size = len(output_bytes) / (1024 * 1024)
253
+ # print(f"压缩完成!")
254
+ # print(f"原始大小: {len(input_bytes) / (1024 * 1024):.2f}MB")
255
+ # print(f"压缩后大小: {final_size:.2f}MB")
256
+ # print(f"最终质量: {quality}")
257
+
258
+ return output_bytes
259
+
260
+ # todo: 大致盘一下各种判定的逻辑【以下的所有压缩比之间的差距均取“绝对值”】
261
+ # 1. 如果requests、无头、有头获取到的压缩比之间从差距都在15%以内,则认定该页面是静态页面,此时优先使用requests请求
262
+ # 2. 如果requests的status_code为特定的412,或者521,则判定是瑞数和jsl。[此时还有一个特点:requests的压缩比会与其他两种方式获取到的压缩比差距非常大(一两千的那种)]
263
+ # 3. 如果requests、无头、有头获取到的压缩比之间差距都在40%以上,则判定该页面只可以用有头采集
264
+ # 4. 如果无头和有头获取到的压缩比之间差距小于15%,但是requests和无头的差距大于40%,则认定该页面可以使用无头浏览器采集
265
+ # 5. 如果requests和有头获取到的压缩比之间差距小于15%,但是无头和有头的差距大于40%,则认定该页面优先使用有头浏览器采集
266
+ # 【此时可能是:1.使用了别的检测无头的waf。2.网站使用瑞数,但是这次请求没有拦截requests(不知道是不是瑞数那边故意设置的),
267
+ # 此时如果想进一步判定是否是瑞数,可以使用有头浏览器取一下cookies,如果cookies里面存在瑞数的cookie,那么就可以断定是瑞数】
@@ -1,117 +0,0 @@
1
- import time
2
- import random
3
- import os
4
- import minify_html
5
- from DrissionPage import ChromiumPage, ChromiumOptions
6
- from bs4 import BeautifulSoup
7
- from curl_cffi import requests
8
- from lxml import html, etree
9
-
10
-
11
- # 使用requests获取html,用于测试是否使用了瑞数和jsl
12
- def requests_html(url):
13
- headers = {
14
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
15
- }
16
- response = requests.get(url, headers=headers, verify=False)
17
- response.encoding = "utf-8"
18
- return response.text, response.status_code
19
-
20
-
21
- # 使用dp无头模式获取html,用于测试是否使用了其他waf,如移动waf
22
- def dp_headless_html(url):
23
- opt = ChromiumOptions().headless(True)
24
- opt.set_argument('--no-sandbox')
25
- """创建新的浏览器实例"""
26
- random_port = random.randint(9934, 10034)
27
- custom_data_dir = os.path.join(os.path.expanduser('~'), 'DrissionPage', "userData", f"{random_port}")
28
- opt.set_user_data_path(custom_data_dir) # 设置用户数据路径
29
- opt.set_local_port(random_port)
30
- page = ChromiumPage(opt)
31
- tab = page.latest_tab
32
- tab.get(url)
33
- # todo: 目前没有更好的方式,为了数据渲染完全,只能硬等【受网速波动影响比较大】
34
- time.sleep(10)
35
- page_html = tab.html
36
- # 无头浏览器在用完之后一定要记得再page级别进行quit
37
- page.quit()
38
- return page_html
39
-
40
-
41
- # 压缩html
42
- def compress_html(content, only_text=False):
43
- doc = html.fromstring(content)
44
- # 删除 style 和 script 标签
45
- for element in doc.xpath('//style | //script'):
46
- element.getparent().remove(element)
47
-
48
- # 删除 link 标签
49
- for link in doc.xpath('//link[@rel="stylesheet"]'):
50
- link.getparent().remove(link)
51
-
52
- # 删除 meta 标签(新增功能)
53
- for meta in doc.xpath('//meta'):
54
- meta.getparent().remove(meta)
55
-
56
- for svg in doc.xpath('//svg'):
57
- # 获取 SVG 内的文本内容
58
- text_content = svg.text_content()
59
- # 创建一个新的文本节点替换 SVG
60
- parent = svg.getparent()
61
- if parent is not None:
62
- parent.text = (parent.text or '') + text_content
63
- parent.remove(svg)
64
-
65
- # 删除 style 属性
66
- for element in doc.xpath('//*[@style]'):
67
- element.attrib.pop('style')
68
-
69
- # 删除所有 on* 事件属性
70
- for element in doc.xpath('//*'):
71
- for attr in list(element.attrib.keys()):
72
- if attr.startswith('on'):
73
- element.attrib.pop(attr)
74
-
75
- result = etree.tostring(doc, encoding='unicode')
76
- result = minify_html.minify(result)
77
- compress_rate = round(len(content) / len(result) * 100)
78
- print(f"html压缩比=> {compress_rate}%")
79
- if not only_text:
80
- return result, compress_rate
81
- soup = BeautifulSoup(result, 'html.parser')
82
- result = soup.get_text(strip=True)
83
- return result, compress_rate
84
-
85
-
86
- # 通过cookie判断是否有waf,需要通过遇到的例子,不断的完善cookie判别函数
87
- def assert_waf_cookie(cookies: list):
88
- for cookie in cookies:
89
- cookie_name = cookie['name']
90
- cookie_value = cookie['value']
91
- if len(cookie_name) == 13 and len(cookie_value) == 88:
92
- return True, "瑞数"
93
- if "_jsl" in cookie_name:
94
- return True, "加速乐"
95
- return False, "没有waf"
96
-
97
-
98
- # 对dp_mcp的消息打包
99
- def dp_mcp_message_pack(message: str, **kwargs):
100
- text_obj = {key: value for key, value in kwargs.items()}
101
- text_obj.update({"message": message})
102
- return {
103
- "content": [{
104
- "type": "text",
105
- # "text": json.dumps(text_obj, ensure_ascii=False)
106
- "text": text_obj
107
- }]
108
- }
109
-
110
- # todo: 大致盘一下各种判定的逻辑【以下的所有压缩比之间的差距均取“绝对值”】
111
- # 1. 如果requests、无头、有头获取到的压缩比之间从差距都在15%以内,则认定该页面是静态页面,此时优先使用requests请求
112
- # 2. 如果requests的status_code为特定的412,或者521,则判定是瑞数和jsl。[此时还有一个特点:requests的压缩比会与其他两种方式获取到的压缩比差距非常大(一两千的那种)]
113
- # 3. 如果requests、无头、有头获取到的压缩比之间差距都在40%以上,则判定该页面只可以用有头采集
114
- # 4. 如果无头和有头获取到的压缩比之间差距小于15%,但是requests和无头的差距大于40%,则认定该页面可以使用无头浏览器采集
115
- # 5. 如果requests和有头获取到的压缩比之间差距小于15%,但是无头和有头的差距大于40%,则认定该页面优先使用有头浏览器采集
116
- # 【此时可能是:1.使用了别的检测无头的waf。2.网站使用瑞数,但是这次请求没有拦截requests(不知道是不是瑞数那边故意设置的),
117
- # 此时如果想进一步判定是否是瑞数,可以使用有头浏览器取一下cookies,如果cookies里面存在瑞数的cookie,那么就可以断定是瑞数】
File without changes