crawlo 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (120) hide show
  1. crawlo/__init__.py +34 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -155
  6. crawlo/commands/genspider.py +152 -111
  7. crawlo/commands/list.py +156 -119
  8. crawlo/commands/run.py +285 -170
  9. crawlo/commands/startproject.py +196 -101
  10. crawlo/commands/stats.py +188 -167
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +162 -57
  18. crawlo/crawler.py +1028 -493
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +252 -277
  22. crawlo/downloader/httpx_downloader.py +257 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +78 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +269 -166
  49. crawlo/pipelines/__init__.py +13 -13
  50. crawlo/pipelines/console_pipeline.py +39 -39
  51. crawlo/pipelines/csv_pipeline.py +317 -0
  52. crawlo/pipelines/json_pipeline.py +219 -0
  53. crawlo/pipelines/mongo_pipeline.py +116 -116
  54. crawlo/pipelines/mysql_pipeline.py +195 -195
  55. crawlo/pipelines/pipeline_manager.py +56 -56
  56. crawlo/project.py +153 -0
  57. crawlo/queue/pqueue.py +37 -0
  58. crawlo/queue/queue_manager.py +304 -0
  59. crawlo/queue/redis_priority_queue.py +192 -0
  60. crawlo/settings/__init__.py +7 -7
  61. crawlo/settings/default_settings.py +226 -169
  62. crawlo/settings/setting_manager.py +99 -99
  63. crawlo/spider/__init__.py +639 -129
  64. crawlo/stats_collector.py +59 -59
  65. crawlo/subscriber.py +106 -106
  66. crawlo/task_manager.py +30 -27
  67. crawlo/templates/crawlo.cfg.tmpl +10 -10
  68. crawlo/templates/project/__init__.py.tmpl +3 -3
  69. crawlo/templates/project/items.py.tmpl +17 -17
  70. crawlo/templates/project/middlewares.py.tmpl +87 -76
  71. crawlo/templates/project/pipelines.py.tmpl +336 -64
  72. crawlo/templates/project/run.py.tmpl +239 -0
  73. crawlo/templates/project/settings.py.tmpl +248 -54
  74. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  75. crawlo/templates/spider/spider.py.tmpl +178 -32
  76. crawlo/utils/__init__.py +7 -7
  77. crawlo/utils/controlled_spider_mixin.py +336 -0
  78. crawlo/utils/date_tools.py +233 -233
  79. crawlo/utils/db_helper.py +343 -343
  80. crawlo/utils/func_tools.py +82 -82
  81. crawlo/utils/large_scale_config.py +287 -0
  82. crawlo/utils/large_scale_helper.py +344 -0
  83. crawlo/utils/log.py +128 -128
  84. crawlo/utils/queue_helper.py +176 -0
  85. crawlo/utils/request.py +267 -267
  86. crawlo/utils/request_serializer.py +220 -0
  87. crawlo/utils/spider_loader.py +62 -62
  88. crawlo/utils/system.py +11 -11
  89. crawlo/utils/tools.py +4 -4
  90. crawlo/utils/url.py +39 -39
  91. crawlo-1.1.2.dist-info/METADATA +567 -0
  92. crawlo-1.1.2.dist-info/RECORD +108 -0
  93. examples/__init__.py +7 -0
  94. tests/__init__.py +7 -7
  95. tests/test_final_validation.py +154 -0
  96. tests/test_proxy_health_check.py +32 -32
  97. tests/test_proxy_middleware_integration.py +136 -136
  98. tests/test_proxy_providers.py +56 -56
  99. tests/test_proxy_stats.py +19 -19
  100. tests/test_proxy_strategies.py +59 -59
  101. tests/test_redis_config.py +29 -0
  102. tests/test_redis_queue.py +225 -0
  103. tests/test_request_serialization.py +71 -0
  104. tests/test_scheduler.py +242 -0
  105. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  106. crawlo/utils/concurrency_manager.py +0 -125
  107. crawlo/utils/pqueue.py +0 -174
  108. crawlo/utils/project.py +0 -197
  109. crawlo-1.1.0.dist-info/METADATA +0 -49
  110. crawlo-1.1.0.dist-info/RECORD +0 -97
  111. examples/gxb/items.py +0 -36
  112. examples/gxb/run.py +0 -16
  113. examples/gxb/settings.py +0 -72
  114. examples/gxb/spider/__init__.py +0 -2
  115. examples/gxb/spider/miit_spider.py +0 -180
  116. examples/gxb/spider/telecom_device.py +0 -129
  117. {examples/gxb → crawlo/queue}/__init__.py +0 -0
  118. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
  119. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
  120. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
@@ -1,78 +1,242 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from abc import abstractmethod, ABCMeta
4
- from typing_extensions import Self
5
- from typing import Final, Set, Optional
6
- from contextlib import asynccontextmanager
7
-
8
- from crawlo import Response, Request
9
- from crawlo.utils.log import get_logger
10
- from crawlo.middleware.middleware_manager import MiddlewareManager
11
-
12
-
13
- class ActivateRequestManager:
14
-
15
- def __init__(self):
16
- self._active: Final[Set] = set()
17
-
18
- def add(self, request):
19
- self._active.add(request)
20
-
21
- def remove(self, request):
22
- self._active.remove(request)
23
-
24
- @asynccontextmanager
25
- async def __call__(self, request):
26
- try:
27
- yield self.add(request)
28
- finally:
29
- self.remove(request)
30
-
31
- def __len__(self):
32
- return len(self._active)
33
-
34
-
35
- class DownloaderMeta(ABCMeta):
36
- def __subclasscheck__(self, subclass):
37
- required_methods = ('fetch', 'download', 'create_instance', 'close')
38
- is_subclass = all(
39
- hasattr(subclass, method) and callable(getattr(subclass, method, None)) for method in required_methods
40
- )
41
- return is_subclass
42
-
43
-
44
- class DownloaderBase(metaclass=DownloaderMeta):
45
- def __init__(self, crawler):
46
- self.crawler = crawler
47
- self._active = ActivateRequestManager()
48
- self.middleware: Optional[MiddlewareManager] = None
49
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
50
-
51
- @classmethod
52
- def create_instance(cls, *args, **kwargs) -> Self:
53
- return cls(*args, **kwargs)
54
-
55
- def open(self) -> None:
56
- self.logger.info(
57
- f"{self.crawler.spider} <downloader class:{type(self).__name__}>"
58
- f"<concurrency:{self.crawler.settings.get_int('CONCURRENCY')}>"
59
- )
60
- self.middleware = MiddlewareManager.create_instance(self.crawler)
61
-
62
- async def fetch(self, request) -> Optional[Response]:
63
- async with self._active(request):
64
- response = await self.middleware.download(request)
65
- return response
66
-
67
- @abstractmethod
68
- async def download(self, request: Request) -> Response:
69
- pass
70
-
71
- async def close(self) -> None:
72
- pass
73
-
74
- def idle(self) -> bool:
75
- return len(self) == 0
76
-
77
- def __len__(self) -> int:
78
- return len(self._active)
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ Crawlo Downloader Module
5
+ ========================
6
+ 提供多种高性能异步下载器实现。
7
+
8
+ 下载器类型:
9
+ - AioHttpDownloader: 基于aiohttp的高性能下载器
10
+ - CurlCffiDownloader: 支持浏览器指纹模拟的curl-cffi下载器
11
+ - HttpXDownloader: 支持HTTP/2的httpx下载器
12
+
13
+ 核心类:
14
+ - DownloaderBase: 下载器基类
15
+ - ActivateRequestManager: 活跃请求管理器
16
+ """
17
+ from abc import abstractmethod, ABCMeta
18
+ from typing import Final, Set, Optional
19
+ from contextlib import asynccontextmanager
20
+
21
+ from crawlo.utils.log import get_logger
22
+ from crawlo.middleware.middleware_manager import MiddlewareManager
23
+
24
+
25
+ class ActivateRequestManager:
26
+ """活跃请求管理器 - 跟踪和管理正在处理的请求"""
27
+
28
+ def __init__(self):
29
+ self._active: Final[Set] = set()
30
+ self._total_requests: int = 0
31
+ self._completed_requests: int = 0
32
+ self._failed_requests: int = 0
33
+
34
+ def add(self, request):
35
+ """添加活跃请求"""
36
+ self._active.add(request)
37
+ self._total_requests += 1
38
+ return request
39
+
40
+ def remove(self, request, success: bool = True):
41
+ """移除活跃请求并更新统计"""
42
+ self._active.discard(request) # 使用discard避免KeyError
43
+ if success:
44
+ self._completed_requests += 1
45
+ else:
46
+ self._failed_requests += 1
47
+
48
+ @asynccontextmanager
49
+ async def __call__(self, request):
50
+ """上下文管理器用法"""
51
+ self.add(request)
52
+ success = False
53
+ try:
54
+ yield request
55
+ success = True
56
+ except Exception:
57
+ success = False
58
+ raise
59
+ finally:
60
+ self.remove(request, success)
61
+
62
+ def __len__(self):
63
+ """返回当前活跃请求数"""
64
+ return len(self._active)
65
+
66
+ def get_stats(self) -> dict:
67
+ """获取请求统计信息"""
68
+ return {
69
+ 'active_requests': len(self._active),
70
+ 'total_requests': self._total_requests,
71
+ 'completed_requests': self._completed_requests,
72
+ 'failed_requests': self._failed_requests,
73
+ 'success_rate': self._completed_requests / max(1, self._total_requests - len(self._active))
74
+ }
75
+
76
+ def reset_stats(self):
77
+ """重置统计信息"""
78
+ self._total_requests = 0
79
+ self._completed_requests = 0
80
+ self._failed_requests = 0
81
+ # 注意:不清空 _active,因为可能有正在进行的请求
82
+
83
+
84
+ class DownloaderMeta(ABCMeta):
85
+ def __subclasscheck__(self, subclass):
86
+ required_methods = ('fetch', 'download', 'create_instance', 'close')
87
+ is_subclass = all(
88
+ hasattr(subclass, method) and callable(getattr(subclass, method, None)) for method in required_methods
89
+ )
90
+ return is_subclass
91
+
92
+
93
+ class DownloaderBase(metaclass=DownloaderMeta):
94
+ """
95
+ 下载器基类 - 提供通用的下载器功能和接口
96
+
97
+ 所有下载器实现都应该继承此基类。
98
+ """
99
+
100
+ def __init__(self, crawler):
101
+ self.crawler = crawler
102
+ self._active = ActivateRequestManager()
103
+ self.middleware: Optional[MiddlewareManager] = None
104
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
105
+ self._closed = False
106
+ self._stats_enabled = crawler.settings.get_bool("DOWNLOADER_STATS", True)
107
+
108
+ @classmethod
109
+ def create_instance(cls, *args, **kwargs):
110
+ """创建下载器实例"""
111
+ return cls(*args, **kwargs)
112
+
113
+ def open(self) -> None:
114
+ """初始化下载器"""
115
+ if self._closed:
116
+ raise RuntimeError(f"{self.__class__.__name__} 已关闭,无法重新打开")
117
+
118
+ self.logger.info(
119
+ f"{self.crawler.spider} <下载器类:{type(self).__name__}> "
120
+ f"<并发数:{self.crawler.settings.get_int('CONCURRENCY')}>"
121
+ )
122
+
123
+ try:
124
+ self.middleware = MiddlewareManager.create_instance(self.crawler)
125
+ self.logger.debug(f"{self.__class__.__name__} 中间件初始化完成")
126
+ except Exception as e:
127
+ self.logger.error(f"中间件初始化失败: {e}")
128
+ raise
129
+
130
+ async def fetch(self, request) -> Optional['Response']:
131
+ """获取请求响应(经过中间件处理)"""
132
+ if self._closed:
133
+ raise RuntimeError(f"{self.__class__.__name__} 已关闭")
134
+
135
+ if not self.middleware:
136
+ raise RuntimeError("中间件未初始化")
137
+
138
+ async with self._active(request):
139
+ try:
140
+ response = await self.middleware.download(request)
141
+ return response
142
+ except Exception as e:
143
+ self.logger.error(f"下载请求 {request.url} 失败: {e}")
144
+ raise
145
+
146
+ @abstractmethod
147
+ async def download(self, request) -> 'Response':
148
+ """子类必须实现的下载方法"""
149
+ pass
150
+
151
+ async def close(self) -> None:
152
+ """关闭下载器并清理资源"""
153
+ if not self._closed:
154
+ self._closed = True
155
+ if self._stats_enabled:
156
+ stats = self.get_stats()
157
+ self.logger.info(f"{self.__class__.__name__} 统计: {stats}")
158
+ self.logger.debug(f"{self.__class__.__name__} 已关闭")
159
+
160
+ def idle(self) -> bool:
161
+ """检查是否空闲(无活跃请求)"""
162
+ return len(self._active) == 0
163
+
164
+ def __len__(self) -> int:
165
+ """返回活跃请求数"""
166
+ return len(self._active)
167
+
168
+ def get_stats(self) -> dict:
169
+ """获取下载器统计信息"""
170
+ base_stats = {
171
+ 'downloader_class': self.__class__.__name__,
172
+ 'is_idle': self.idle(),
173
+ 'is_closed': self._closed
174
+ }
175
+
176
+ if self._stats_enabled:
177
+ base_stats.update(self._active.get_stats())
178
+
179
+ return base_stats
180
+
181
+ def reset_stats(self):
182
+ """重置统计信息"""
183
+ if self._stats_enabled:
184
+ self._active.reset_stats()
185
+
186
+ def health_check(self) -> dict:
187
+ """健康检查"""
188
+ return {
189
+ 'status': 'healthy' if not self._closed and self.middleware else 'unhealthy',
190
+ 'active_requests': len(self._active),
191
+ 'middleware_ready': self.middleware is not None,
192
+ 'closed': self._closed
193
+ }
194
+
195
+
196
+ # 导入具体的下载器实现
197
+ try:
198
+ from .aiohttp_downloader import AioHttpDownloader
199
+ except ImportError:
200
+ AioHttpDownloader = None
201
+
202
+ try:
203
+ from .cffi_downloader import CurlCffiDownloader
204
+ except ImportError:
205
+ CurlCffiDownloader = None
206
+
207
+ try:
208
+ from .httpx_downloader import HttpXDownloader
209
+ except ImportError:
210
+ HttpXDownloader = None
211
+
212
+ # 导出所有可用的类
213
+ __all__ = [
214
+ 'DownloaderBase',
215
+ 'DownloaderMeta',
216
+ 'ActivateRequestManager',
217
+ ]
218
+
219
+ # 添加可用的下载器
220
+ if AioHttpDownloader:
221
+ __all__.append('AioHttpDownloader')
222
+ if CurlCffiDownloader:
223
+ __all__.append('CurlCffiDownloader')
224
+ if HttpXDownloader:
225
+ __all__.append('HttpXDownloader')
226
+
227
+ # 提供便捷的下载器映射
228
+ DOWNLOADER_MAP = {
229
+ 'aiohttp': AioHttpDownloader,
230
+ 'httpx': HttpXDownloader,
231
+ 'curl_cffi': CurlCffiDownloader,
232
+ 'cffi': CurlCffiDownloader, # 别名
233
+ }
234
+
235
+ # 过滤掉不可用的下载器
236
+ DOWNLOADER_MAP = {k: v for k, v in DOWNLOADER_MAP.items() if v is not None}
237
+
238
+ def get_downloader_class(name: str):
239
+ """根据名称获取下载器类"""
240
+ if name in DOWNLOADER_MAP:
241
+ return DOWNLOADER_MAP[name]
242
+ raise ValueError(f"未知的下载器类型: {name}。可用类型: {list(DOWNLOADER_MAP.keys())}")