crawlo 1.4.3__py3-none-any.whl → 1.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (107) hide show
  1. crawlo/__init__.py +11 -15
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/genspider.py +52 -17
  4. crawlo/commands/startproject.py +24 -0
  5. crawlo/core/engine.py +2 -2
  6. crawlo/core/scheduler.py +4 -4
  7. crawlo/crawler.py +13 -6
  8. crawlo/downloader/__init__.py +5 -2
  9. crawlo/extension/__init__.py +2 -2
  10. crawlo/filters/aioredis_filter.py +8 -1
  11. crawlo/filters/memory_filter.py +8 -1
  12. crawlo/initialization/built_in.py +13 -4
  13. crawlo/initialization/core.py +5 -4
  14. crawlo/interfaces.py +24 -0
  15. crawlo/middleware/__init__.py +7 -4
  16. crawlo/middleware/middleware_manager.py +15 -8
  17. crawlo/mode_manager.py +45 -11
  18. crawlo/network/response.py +374 -69
  19. crawlo/pipelines/mysql_pipeline.py +6 -6
  20. crawlo/pipelines/pipeline_manager.py +2 -2
  21. crawlo/project.py +2 -4
  22. crawlo/queue/pqueue.py +2 -6
  23. crawlo/queue/queue_manager.py +1 -2
  24. crawlo/settings/default_settings.py +15 -30
  25. crawlo/task_manager.py +2 -2
  26. crawlo/templates/project/items.py.tmpl +2 -2
  27. crawlo/templates/project/middlewares.py.tmpl +9 -89
  28. crawlo/templates/project/pipelines.py.tmpl +8 -68
  29. crawlo/templates/project/settings.py.tmpl +51 -65
  30. crawlo/templates/project/settings_distributed.py.tmpl +59 -67
  31. crawlo/templates/project/settings_gentle.py.tmpl +45 -40
  32. crawlo/templates/project/settings_high_performance.py.tmpl +45 -40
  33. crawlo/templates/project/settings_minimal.py.tmpl +37 -26
  34. crawlo/templates/project/settings_simple.py.tmpl +45 -40
  35. crawlo/templates/run.py.tmpl +3 -7
  36. crawlo/tools/__init__.py +0 -11
  37. crawlo/utils/__init__.py +17 -1
  38. crawlo/utils/db_helper.py +220 -319
  39. crawlo/utils/error_handler.py +313 -67
  40. crawlo/utils/fingerprint.py +3 -4
  41. crawlo/utils/misc.py +82 -0
  42. crawlo/utils/request.py +55 -66
  43. crawlo/utils/selector_helper.py +138 -0
  44. crawlo/utils/spider_loader.py +185 -45
  45. crawlo/utils/text_helper.py +95 -0
  46. crawlo-1.4.5.dist-info/METADATA +329 -0
  47. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/RECORD +89 -68
  48. tests/bug_check_test.py +251 -0
  49. tests/direct_selector_helper_test.py +97 -0
  50. tests/ofweek_scrapy/ofweek_scrapy/items.py +12 -0
  51. tests/ofweek_scrapy/ofweek_scrapy/middlewares.py +100 -0
  52. tests/ofweek_scrapy/ofweek_scrapy/pipelines.py +13 -0
  53. tests/ofweek_scrapy/ofweek_scrapy/settings.py +85 -0
  54. tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -0
  55. tests/ofweek_scrapy/ofweek_scrapy/spiders/ofweek_spider.py +162 -0
  56. tests/ofweek_scrapy/scrapy.cfg +11 -0
  57. tests/performance_comparison.py +4 -5
  58. tests/simple_crawlo_test.py +1 -2
  59. tests/simple_follow_test.py +39 -0
  60. tests/simple_response_selector_test.py +95 -0
  61. tests/simple_selector_helper_test.py +155 -0
  62. tests/simple_selector_test.py +208 -0
  63. tests/simple_url_test.py +74 -0
  64. tests/test_crawler_process_import.py +39 -0
  65. tests/test_crawler_process_spider_modules.py +48 -0
  66. tests/test_edge_cases.py +7 -5
  67. tests/test_encoding_core.py +57 -0
  68. tests/test_encoding_detection.py +127 -0
  69. tests/test_factory_compatibility.py +197 -0
  70. tests/test_multi_directory.py +68 -0
  71. tests/test_multiple_spider_modules.py +81 -0
  72. tests/test_optimized_selector_naming.py +101 -0
  73. tests/test_priority_behavior.py +18 -18
  74. tests/test_response_follow.py +105 -0
  75. tests/test_response_selector_methods.py +93 -0
  76. tests/test_response_url_methods.py +71 -0
  77. tests/test_response_urljoin.py +87 -0
  78. tests/test_scrapy_style_encoding.py +113 -0
  79. tests/test_selector_helper.py +101 -0
  80. tests/test_selector_optimizations.py +147 -0
  81. tests/test_spider_loader.py +50 -0
  82. tests/test_spider_loader_comprehensive.py +70 -0
  83. tests/test_spider_modules.py +85 -0
  84. tests/test_spiders/__init__.py +1 -0
  85. tests/test_spiders/test_spider.py +10 -0
  86. crawlo/tools/anti_crawler.py +0 -269
  87. crawlo/utils/class_loader.py +0 -26
  88. crawlo/utils/enhanced_error_handler.py +0 -357
  89. crawlo-1.4.3.dist-info/METADATA +0 -190
  90. examples/test_project/__init__.py +0 -7
  91. examples/test_project/run.py +0 -35
  92. examples/test_project/test_project/__init__.py +0 -4
  93. examples/test_project/test_project/items.py +0 -18
  94. examples/test_project/test_project/middlewares.py +0 -119
  95. examples/test_project/test_project/pipelines.py +0 -97
  96. examples/test_project/test_project/settings.py +0 -170
  97. examples/test_project/test_project/spiders/__init__.py +0 -10
  98. examples/test_project/test_project/spiders/of_week_dis.py +0 -144
  99. tests/simple_log_test.py +0 -58
  100. tests/simple_test.py +0 -48
  101. tests/test_framework_logger.py +0 -67
  102. tests/test_framework_startup.py +0 -65
  103. tests/test_mode_change.py +0 -73
  104. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/WHEEL +0 -0
  105. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/entry_points.txt +0 -0
  106. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/top_level.txt +0 -0
  107. /tests/{final_command_test_report.md → ofweek_scrapy/ofweek_scrapy/__init__.py} +0 -0
@@ -0,0 +1,147 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 选择器方法优化测试
5
+ """
6
+ import sys
7
+ import os
8
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
9
+
10
+ from crawlo.network.response import Response
11
+
12
+
13
+ def test_selector_optimizations():
14
+ """测试选择器方法优化"""
15
+ print("测试选择器方法优化...")
16
+ print("=" * 50)
17
+
18
+ # 创建一个复杂的HTML响应
19
+ html_content = """
20
+ <html>
21
+ <head>
22
+ <title>测试页面标题</title>
23
+ </head>
24
+ <body>
25
+ <div class="content">
26
+ <h1>主标题</h1>
27
+ <p class="intro">这是介绍段落</p>
28
+ <div class="article">
29
+ <p>第一段内容 <strong>粗体文本</strong> 普通文本</p>
30
+ <p>第二段内容 <em>斜体文本</em></p>
31
+ </div>
32
+ <ul class="list">
33
+ <li>项目1</li>
34
+ <li>项目2</li>
35
+ <li>项目3</li>
36
+ </ul>
37
+ <a href="https://example.com" class="link">链接文本</a>
38
+ <img src="image.jpg" alt="图片描述" class="image">
39
+ <div class="products">
40
+ <div class="product" data-id="1">
41
+ <h2>产品A</h2>
42
+ <p class="price">¥99.99</p>
43
+ </div>
44
+ <div class="product" data-id="2">
45
+ <h2>产品B</h2>
46
+ <p class="price">¥149.99</p>
47
+ </div>
48
+ </div>
49
+ </div>
50
+ </body>
51
+ </html>
52
+ """
53
+
54
+ response = Response(
55
+ url="https://example.com/test",
56
+ body=html_content.encode('utf-8'),
57
+ headers={"content-type": "text/html; charset=utf-8"}
58
+ )
59
+
60
+ # 测试 extract_text 方法
61
+ print("1. 测试 extract_text 方法:")
62
+ title = response.extract_text('title')
63
+ print(f" 标题: {title}")
64
+
65
+ h1_text = response.extract_text('.content h1')
66
+ print(f" H1文本: {h1_text}")
67
+
68
+ # 测试XPath
69
+ title_xpath = response.extract_text('//title')
70
+ print(f" XPath标题: {title_xpath}")
71
+
72
+ # 测试复杂文本提取
73
+ complex_text = response.extract_text('.article p', join_str=' ')
74
+ print(f" 复杂文本: {complex_text}")
75
+
76
+ print()
77
+
78
+ # 测试 extract_texts 方法
79
+ print("2. 测试 extract_texts 方法:")
80
+ list_items = response.extract_texts('.list li')
81
+ print(f" 列表项: {list_items}")
82
+
83
+ # 测试XPath
84
+ list_items_xpath = response.extract_texts('//ul[@class="list"]/li')
85
+ print(f" XPath列表项: {list_items_xpath}")
86
+
87
+ # 测试多个元素
88
+ product_names = response.extract_texts('.product h2')
89
+ print(f" 产品名称: {product_names}")
90
+
91
+ product_prices = response.extract_texts('.price')
92
+ print(f" 产品价格: {product_prices}")
93
+
94
+ print()
95
+
96
+ # 测试 extract_attr 方法
97
+ print("3. 测试 extract_attr 方法:")
98
+ link_href = response.extract_attr('.link', 'href')
99
+ print(f" 链接href: {link_href}")
100
+
101
+ img_alt = response.extract_attr('.image', 'alt')
102
+ print(f" 图片alt: {img_alt}")
103
+
104
+ # 测试XPath
105
+ link_href_xpath = response.extract_attr('//a[@class="link"]', 'href')
106
+ print(f" XPath链接href: {link_href_xpath}")
107
+
108
+ print()
109
+
110
+ # 测试 extract_attrs 方法
111
+ print("4. 测试 extract_attrs 方法:")
112
+ product_ids = response.extract_attrs('.product', 'data-id')
113
+ print(f" 产品ID: {product_ids}")
114
+
115
+ # 测试XPath
116
+ product_ids_xpath = response.extract_attrs('//div[@class="product"]', 'data-id')
117
+ print(f" XPath产品ID: {product_ids_xpath}")
118
+
119
+ # 测试所有链接
120
+ all_links = response.extract_attrs('a', 'href')
121
+ print(f" 所有链接: {all_links}")
122
+
123
+ print()
124
+
125
+ # 测试边界情况
126
+ print("5. 测试边界情况:")
127
+ # 测试默认值
128
+ non_exist = response.extract_text('.non-exist', default='默认文本')
129
+ print(f" 不存在元素的默认值: {non_exist}")
130
+
131
+ non_exist_attr = response.extract_attr('.non-exist', 'href', default='默认链接')
132
+ print(f" 不存在属性的默认值: {non_exist_attr}")
133
+
134
+ print()
135
+
136
+ # 测试空响应
137
+ print("6. 测试空响应:")
138
+ empty_response = Response(url="https://example.com/empty", body=b"")
139
+ empty_text = empty_response.extract_text('title', default='默认标题')
140
+ print(f" 空响应默认值: {empty_text}")
141
+
142
+ print()
143
+ print("所有测试完成!")
144
+
145
+
146
+ if __name__ == '__main__':
147
+ test_selector_optimizations()
@@ -0,0 +1,50 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ """
5
+ 测试SpiderLoader功能
6
+ """
7
+
8
+ import sys
9
+ import os
10
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
11
+
12
+ from crawlo.utils.spider_loader import SpiderLoader
13
+ from crawlo.settings.setting_manager import SettingManager
14
+
15
+
16
+ def test_spider_loader():
17
+ """测试SpiderLoader基本功能"""
18
+ print("测试SpiderLoader基本功能...")
19
+
20
+ # 创建一个简单的设置
21
+ settings = SettingManager({
22
+ 'SPIDER_MODULES': ['tests.test_spiders'],
23
+ 'SPIDER_LOADER_WARN_ONLY': True
24
+ })
25
+
26
+ # 创建SpiderLoader实例
27
+ loader = SpiderLoader.from_settings(settings)
28
+
29
+ # 测试list方法
30
+ spider_names = loader.list()
31
+ print(f"发现的爬虫: {spider_names}")
32
+
33
+ # 测试load方法
34
+ if spider_names:
35
+ spider_name = spider_names[0]
36
+ try:
37
+ spider_class = loader.load(spider_name)
38
+ print(f"成功加载爬虫: {spider_name} -> {spider_class}")
39
+ except KeyError as e:
40
+ print(f"加载爬虫失败: {e}")
41
+
42
+ # 测试get_all方法
43
+ all_spiders = loader.get_all()
44
+ print(f"所有爬虫: {list(all_spiders.keys())}")
45
+
46
+ print("测试完成!")
47
+
48
+
49
+ if __name__ == '__main__':
50
+ test_spider_loader()
@@ -0,0 +1,70 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ """
5
+ 综合测试SpiderLoader功能
6
+ """
7
+
8
+ import sys
9
+ import os
10
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
11
+
12
+ from crawlo.utils.spider_loader import SpiderLoader
13
+ from crawlo.crawler import CrawlerProcess
14
+ from crawlo.settings.setting_manager import SettingManager
15
+
16
+
17
+ def test_spider_loader_comprehensive():
18
+ """综合测试SpiderLoader功能"""
19
+ print("综合测试SpiderLoader功能...")
20
+
21
+ # 1. 测试基本的SpiderLoader功能
22
+ print("\n1. 测试基本的SpiderLoader功能")
23
+ settings = SettingManager({
24
+ 'SPIDER_MODULES': ['tests.test_spiders'],
25
+ 'SPIDER_LOADER_WARN_ONLY': True
26
+ })
27
+
28
+ loader = SpiderLoader.from_settings(settings)
29
+ spider_names = loader.list()
30
+ print(f" 发现的爬虫: {spider_names}")
31
+
32
+ if spider_names:
33
+ spider_name = spider_names[0]
34
+ spider_class = loader.load(spider_name)
35
+ print(f" 成功加载爬虫: {spider_name} -> {spider_class}")
36
+
37
+ # 2. 测试CrawlerProcess与SPIDER_MODULES的集成
38
+ print("\n2. 测试CrawlerProcess与SPIDER_MODULES的集成")
39
+ process = CrawlerProcess(settings=settings)
40
+ process_spider_names = process.get_spider_names()
41
+ print(f" CrawlerProcess发现的爬虫: {process_spider_names}")
42
+
43
+ is_registered = process.is_spider_registered('test_spider')
44
+ print(f" 爬虫'test_spider'是否已注册: {is_registered}")
45
+
46
+ spider_class = process.get_spider_class('test_spider')
47
+ print(f" 爬虫'test_spider'的类: {spider_class}")
48
+
49
+ # 3. 测试接口规范
50
+ print("\n3. 测试接口规范")
51
+ # 检查SpiderLoader是否实现了ISpiderLoader接口所需的方法
52
+ from crawlo.interfaces import ISpiderLoader
53
+ # 由于ISpiderLoader是Protocol,我们不能直接使用isinstance检查
54
+ # 而是检查是否实现了所需的方法
55
+ required_methods = ['load', 'list', 'find_by_request']
56
+ implements_interface = all(hasattr(loader, method) for method in required_methods)
57
+ print(f" SpiderLoader是否实现了ISpiderLoader接口: {implements_interface}")
58
+
59
+ # 4. 测试方法存在性
60
+ print("\n4. 测试方法存在性")
61
+ required_methods = ['load', 'list', 'find_by_request', 'get_all']
62
+ for method in required_methods:
63
+ has_method = hasattr(loader, method)
64
+ print(f" SpiderLoader是否有{method}方法: {has_method}")
65
+
66
+ print("\n综合测试完成!")
67
+
68
+
69
+ if __name__ == '__main__':
70
+ test_spider_loader_comprehensive()
@@ -0,0 +1,85 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 测试SPIDER_MODULES配置的自动读取功能
5
+ """
6
+ import sys
7
+ import os
8
+ import asyncio
9
+
10
+ # 添加项目根目录到Python路径
11
+ sys.path.insert(0, os.path.dirname(__file__))
12
+
13
+ # 添加ofweek_standalone到Python路径
14
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'examples', 'ofweek_standalone'))
15
+
16
+ from crawlo.crawler import CrawlerProcess
17
+ from crawlo.spider import get_spider_names
18
+
19
+
20
+ def test_spider_modules_auto_discovery():
21
+ """测试SPIDER_MODULES配置的自动读取功能"""
22
+ print("测试SPIDER_MODULES配置的自动读取功能...")
23
+
24
+ # 导入设置
25
+ import examples.ofweek_standalone.ofweek_standalone.settings as settings_module
26
+
27
+ # 创建设置管理器
28
+ from crawlo.settings.setting_manager import SettingManager
29
+ settings = SettingManager()
30
+ settings.set_settings(settings_module)
31
+
32
+ # 创建CrawlerProcess实例,不显式传递spider_modules
33
+ process = CrawlerProcess(settings=settings)
34
+
35
+ # 检查是否自动注册了爬虫
36
+ spider_names = process.get_spider_names()
37
+ print(f"已注册的爬虫: {spider_names}")
38
+
39
+ # 验证期望的爬虫是否已注册
40
+ expected_spider = 'of_week_standalone'
41
+ if expected_spider in spider_names:
42
+ print(f"✅ 成功: 爬虫 '{expected_spider}' 已自动注册")
43
+ return True
44
+ else:
45
+ print(f"❌ 失败: 爬虫 '{expected_spider}' 未找到")
46
+ return False
47
+
48
+
49
+ def test_crawler_process_with_explicit_spider_modules():
50
+ """测试显式传递spider_modules参数的功能"""
51
+ print("\n测试显式传递spider_modules参数的功能...")
52
+
53
+ # 显式传递spider_modules参数
54
+ spider_modules = ['ofweek_standalone.spiders']
55
+ process = CrawlerProcess(spider_modules=spider_modules)
56
+
57
+ # 检查是否注册了爬虫
58
+ spider_names = process.get_spider_names()
59
+ print(f"已注册的爬虫: {spider_names}")
60
+
61
+ # 验证期望的爬虫是否已注册
62
+ expected_spider = 'of_week_standalone'
63
+ if expected_spider in spider_names:
64
+ print(f"✅ 成功: 爬虫 '{expected_spider}' 已注册")
65
+ return True
66
+ else:
67
+ print(f"❌ 失败: 爬虫 '{expected_spider}' 未找到")
68
+ return False
69
+
70
+
71
+ if __name__ == '__main__':
72
+ print("开始测试SPIDER_MODULES配置功能...\n")
73
+
74
+ # 测试自动发现功能
75
+ success1 = test_spider_modules_auto_discovery()
76
+
77
+ # 测试显式传递参数功能
78
+ success2 = test_crawler_process_with_explicit_spider_modules()
79
+
80
+ if success1 and success2:
81
+ print("\n🎉 所有测试通过!")
82
+ sys.exit(0)
83
+ else:
84
+ print("\n❌ 部分测试失败!")
85
+ sys.exit(1)
@@ -0,0 +1 @@
1
+ # 测试爬虫模块
@@ -0,0 +1,10 @@
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from crawlo.spider import Spider
4
+
5
+
6
+ class TestSpider(Spider):
7
+ name = 'test_spider'
8
+
9
+ def parse(self, response):
10
+ pass
@@ -1,269 +0,0 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- # @Time : 2025-09-10 22:00
5
- # @Author : crawl-coder
6
- # @Desc : 反爬虫应对工具
7
- """
8
-
9
- import asyncio
10
- import random
11
- import time
12
- from typing import Dict, Any, Optional, List, Callable
13
-
14
-
15
- class ProxyPoolManager:
16
- """代理池管理器类"""
17
-
18
- def __init__(self, proxies: Optional[List[Dict[str, str]]] = None):
19
- """
20
- 初始化代理池管理器
21
-
22
- Args:
23
- proxies (Optional[List[Dict[str, str]]]): 代理列表
24
- """
25
- self.proxies = proxies or [
26
- {"http": "http://proxy1.example.com:8080", "https": "https://proxy1.example.com:8080"},
27
- {"http": "http://proxy2.example.com:8080", "https": "https://proxy2.example.com:8080"},
28
- {"http": "http://proxy3.example.com:8080", "https": "https://proxy3.example.com:8080"}
29
- ]
30
- self.proxy_status = {id(proxy): {"last_used": 0, "success_count": 0, "fail_count": 0}
31
- for proxy in self.proxies}
32
-
33
- def get_random_proxy(self) -> Dict[str, str]:
34
- """
35
- 获取随机代理
36
-
37
- Returns:
38
- Dict[str, str]: 代理配置
39
- """
40
- return random.choice(self.proxies)
41
-
42
- def get_best_proxy(self) -> Dict[str, str]:
43
- """
44
- 根据成功率获取最佳代理
45
-
46
- Returns:
47
- Dict[str, str]: 代理配置
48
- """
49
- if not self.proxy_status:
50
- return self.get_random_proxy()
51
-
52
- # 计算每个代理的成功率
53
- proxy_scores = []
54
- for proxy in self.proxies:
55
- proxy_id = id(proxy)
56
- status = self.proxy_status.get(proxy_id, {"success_count": 0, "fail_count": 0})
57
- total = status["success_count"] + status["fail_count"]
58
-
59
- if total == 0:
60
- score = 0.5 # 默认成功率
61
- else:
62
- score = status["success_count"] / total
63
-
64
- proxy_scores.append((proxy, score))
65
-
66
- # 按成功率排序,返回成功率最高的代理
67
- proxy_scores.sort(key=lambda x: x[1], reverse=True)
68
- return proxy_scores[0][0]
69
-
70
- def report_proxy_result(self, proxy: Dict[str, str], success: bool) -> None:
71
- """
72
- 报告代理使用结果
73
-
74
- Args:
75
- proxy (Dict[str, str]): 代理配置
76
- success (bool): 是否成功
77
- """
78
- proxy_id = id(proxy)
79
- if proxy_id not in self.proxy_status:
80
- self.proxy_status[proxy_id] = {"last_used": 0, "success_count": 0, "fail_count": 0}
81
-
82
- status = self.proxy_status[proxy_id]
83
- status["last_used"] = time.time()
84
-
85
- if success:
86
- status["success_count"] += 1
87
- else:
88
- status["fail_count"] += 1
89
-
90
- def remove_invalid_proxy(self, proxy: Dict[str, str]) -> None:
91
- """
92
- 移除无效代理
93
-
94
- Args:
95
- proxy (Dict[str, str]): 代理配置
96
- """
97
- if proxy in self.proxies:
98
- self.proxies.remove(proxy)
99
- proxy_id = id(proxy)
100
- if proxy_id in self.proxy_status:
101
- del self.proxy_status[proxy_id]
102
-
103
-
104
- class CaptchaHandler:
105
- """验证码处理器类"""
106
-
107
- def __init__(self, captcha_service: Optional[Callable] = None):
108
- """
109
- 初始化验证码处理器
110
-
111
- Args:
112
- captcha_service (Optional[Callable]): 验证码识别服务
113
- """
114
- self.captcha_service = captcha_service
115
-
116
- async def recognize_captcha(self, image_data: bytes,
117
- captcha_type: str = "image") -> Optional[str]:
118
- """
119
- 识别验证码
120
-
121
- Args:
122
- image_data (bytes): 验证码图片数据
123
- captcha_type (str): 验证码类型
124
-
125
- Returns:
126
- Optional[str]: 识别结果
127
- """
128
- if self.captcha_service:
129
- try:
130
- return await self.captcha_service(image_data, captcha_type)
131
- except Exception:
132
- return None
133
- else:
134
- # 如果没有配置验证码服务,返回None
135
- return None
136
-
137
- async def handle_manual_captcha(self, prompt: str = "请输入验证码: ") -> str:
138
- """
139
- 处理手动验证码输入
140
-
141
- Args:
142
- prompt (str): 提示信息
143
-
144
- Returns:
145
- str: 用户输入的验证码
146
- """
147
- # 在实际应用中,这里可能需要与用户界面交互
148
- # 为了演示目的,我们模拟用户输入
149
- print(prompt)
150
- return input() if not asyncio.get_event_loop().is_running() else ""
151
-
152
-
153
- class AntiCrawler:
154
- """反爬虫应对工具类"""
155
-
156
- def __init__(self, proxies: Optional[List[Dict[str, str]]] = None,
157
- captcha_service: Optional[Callable] = None):
158
- """
159
- 初始化反爬虫应对工具
160
-
161
- Args:
162
- proxies (Optional[List[Dict[str, str]]]): 代理列表
163
- captcha_service (Optional[Callable]): 验证码识别服务
164
- """
165
- self.proxy_manager = ProxyPoolManager(proxies)
166
- self.captcha_handler = CaptchaHandler(captcha_service)
167
-
168
- def get_random_user_agent(self) -> str:
169
- """
170
- 获取随机User-Agent
171
-
172
- Returns:
173
- str: 随机User-Agent
174
- """
175
- user_agents = [
176
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
177
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
178
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
179
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
180
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59",
181
- "Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
182
- "Mozilla/5.0 (Linux; Android 11; Pixel 5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Mobile Safari/537.36"
183
- ]
184
- return random.choice(user_agents)
185
-
186
- def rotate_proxy(self) -> Dict[str, str]:
187
- """
188
- 轮换代理
189
-
190
- Returns:
191
- Dict[str, str]: 代理配置
192
- """
193
- return self.proxy_manager.get_best_proxy()
194
-
195
- def handle_captcha(self, response_text: str) -> bool:
196
- """
197
- 检测是否遇到验证码
198
-
199
- Args:
200
- response_text (str): 响应文本
201
-
202
- Returns:
203
- bool: 是否遇到验证码
204
- """
205
- captcha_keywords = ["captcha", "verify", "验证", "验证码", "human verification"]
206
- return any(keyword in response_text.lower() for keyword in captcha_keywords)
207
-
208
- def detect_rate_limiting(self, status_code: int, response_headers: Dict[str, Any]) -> bool:
209
- """
210
- 检测是否遇到频率限制
211
-
212
- Args:
213
- status_code (int): HTTP状态码
214
- response_headers (Dict[str, Any]): 响应头
215
-
216
- Returns:
217
- bool: 是否遇到频率限制
218
- """
219
- # 检查状态码
220
- if status_code in [429, 503]:
221
- return True
222
-
223
- # 检查响应头
224
- rate_limit_headers = ["x-ratelimit-remaining", "retry-after", "x-ratelimit-reset"]
225
- return any(header.lower() in [k.lower() for k in response_headers.keys()]
226
- for header in rate_limit_headers)
227
-
228
- def random_delay(self, min_delay: float = 1.0, max_delay: float = 3.0) -> None:
229
- """
230
- 随机延迟,避免请求过于频繁
231
-
232
- Args:
233
- min_delay (float): 最小延迟时间(秒)
234
- max_delay (float): 最大延迟时间(秒)
235
- """
236
- delay = random.uniform(min_delay, max_delay)
237
- time.sleep(delay)
238
-
239
- async def async_random_delay(self, min_delay: float = 1.0, max_delay: float = 3.0) -> None:
240
- """
241
- 异步随机延迟,避免请求过于频繁
242
-
243
- Args:
244
- min_delay (float): 最小延迟时间(秒)
245
- max_delay (float): 最大延迟时间(秒)
246
- """
247
- delay = random.uniform(min_delay, max_delay)
248
- await asyncio.sleep(delay)
249
-
250
-
251
- # 便捷函数
252
- def get_random_user_agent() -> str:
253
- """获取随机User-Agent"""
254
- return AntiCrawler().get_random_user_agent()
255
-
256
-
257
- def rotate_proxy(proxies: Optional[List[Dict[str, str]]] = None) -> Dict[str, str]:
258
- """轮换代理"""
259
- return AntiCrawler(proxies).rotate_proxy()
260
-
261
-
262
- def handle_captcha(response_text: str) -> bool:
263
- """检测是否遇到验证码"""
264
- return AntiCrawler().handle_captcha(response_text)
265
-
266
-
267
- def detect_rate_limiting(status_code: int, response_headers: Dict[str, Any]) -> bool:
268
- """检测是否遇到频率限制"""
269
- return AntiCrawler().detect_rate_limiting(status_code, response_headers)
@@ -1,26 +0,0 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- 类加载器工具模块
4
- ==============
5
- 提供动态类加载功能,避免循环依赖问题。
6
- """
7
- import importlib
8
- from typing import Any
9
-
10
-
11
- def load_class(path: str) -> Any:
12
- """
13
- 动态加载类
14
-
15
- Args:
16
- path: 类的完整路径,如 'package.module.ClassName'
17
-
18
- Returns:
19
- 加载的类对象
20
- """
21
- try:
22
- module_path, class_name = path.rsplit('.', 1)
23
- module = importlib.import_module(module_path)
24
- return getattr(module, class_name)
25
- except (ValueError, ImportError, AttributeError) as e:
26
- raise ImportError(f"无法加载类 '{path}': {e}")