crawlo 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (118) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +165 -165
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -259
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +81 -81
  25. crawlo/extension/__init__.py +38 -31
  26. crawlo/extension/health_check.py +142 -0
  27. crawlo/extension/log_interval.py +58 -49
  28. crawlo/extension/log_stats.py +82 -44
  29. crawlo/extension/logging_extension.py +44 -35
  30. crawlo/extension/memory_monitor.py +89 -0
  31. crawlo/extension/performance_profiler.py +118 -0
  32. crawlo/extension/request_recorder.py +108 -0
  33. crawlo/filters/__init__.py +154 -154
  34. crawlo/filters/aioredis_filter.py +241 -241
  35. crawlo/filters/memory_filter.py +269 -269
  36. crawlo/items/__init__.py +23 -23
  37. crawlo/items/base.py +21 -21
  38. crawlo/items/fields.py +53 -53
  39. crawlo/items/items.py +104 -104
  40. crawlo/middleware/__init__.py +21 -21
  41. crawlo/middleware/default_header.py +32 -32
  42. crawlo/middleware/download_delay.py +28 -28
  43. crawlo/middleware/middleware_manager.py +135 -135
  44. crawlo/middleware/proxy.py +248 -248
  45. crawlo/middleware/request_ignore.py +30 -30
  46. crawlo/middleware/response_code.py +18 -18
  47. crawlo/middleware/response_filter.py +26 -26
  48. crawlo/middleware/retry.py +124 -124
  49. crawlo/mode_manager.py +200 -200
  50. crawlo/network/__init__.py +21 -21
  51. crawlo/network/request.py +311 -311
  52. crawlo/network/response.py +271 -271
  53. crawlo/pipelines/__init__.py +21 -21
  54. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  55. crawlo/pipelines/console_pipeline.py +39 -39
  56. crawlo/pipelines/csv_pipeline.py +316 -316
  57. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  58. crawlo/pipelines/json_pipeline.py +218 -218
  59. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  60. crawlo/pipelines/mongo_pipeline.py +132 -117
  61. crawlo/pipelines/mysql_pipeline.py +317 -195
  62. crawlo/pipelines/pipeline_manager.py +56 -56
  63. crawlo/pipelines/redis_dedup_pipeline.py +162 -162
  64. crawlo/project.py +153 -153
  65. crawlo/queue/pqueue.py +37 -37
  66. crawlo/queue/queue_manager.py +307 -307
  67. crawlo/queue/redis_priority_queue.py +208 -208
  68. crawlo/settings/__init__.py +7 -7
  69. crawlo/settings/default_settings.py +278 -244
  70. crawlo/settings/setting_manager.py +99 -99
  71. crawlo/spider/__init__.py +639 -639
  72. crawlo/stats_collector.py +59 -59
  73. crawlo/subscriber.py +131 -106
  74. crawlo/task_manager.py +30 -30
  75. crawlo/templates/crawlo.cfg.tmpl +10 -10
  76. crawlo/templates/project/__init__.py.tmpl +3 -3
  77. crawlo/templates/project/items.py.tmpl +17 -17
  78. crawlo/templates/project/middlewares.py.tmpl +111 -87
  79. crawlo/templates/project/pipelines.py.tmpl +97 -341
  80. crawlo/templates/project/run.py.tmpl +251 -251
  81. crawlo/templates/project/settings.py.tmpl +279 -250
  82. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  83. crawlo/templates/spider/spider.py.tmpl +142 -178
  84. crawlo/utils/__init__.py +7 -7
  85. crawlo/utils/controlled_spider_mixin.py +439 -439
  86. crawlo/utils/date_tools.py +233 -233
  87. crawlo/utils/db_helper.py +343 -343
  88. crawlo/utils/func_tools.py +82 -82
  89. crawlo/utils/large_scale_config.py +286 -286
  90. crawlo/utils/large_scale_helper.py +343 -343
  91. crawlo/utils/log.py +128 -128
  92. crawlo/utils/queue_helper.py +175 -175
  93. crawlo/utils/request.py +267 -267
  94. crawlo/utils/request_serializer.py +219 -219
  95. crawlo/utils/spider_loader.py +62 -62
  96. crawlo/utils/system.py +11 -11
  97. crawlo/utils/tools.py +4 -4
  98. crawlo/utils/url.py +39 -39
  99. crawlo-1.1.4.dist-info/METADATA +403 -0
  100. crawlo-1.1.4.dist-info/RECORD +117 -0
  101. examples/__init__.py +7 -7
  102. examples/controlled_spider_example.py +205 -205
  103. tests/__init__.py +7 -7
  104. tests/test_final_validation.py +153 -153
  105. tests/test_proxy_health_check.py +32 -32
  106. tests/test_proxy_middleware_integration.py +136 -136
  107. tests/test_proxy_providers.py +56 -56
  108. tests/test_proxy_stats.py +19 -19
  109. tests/test_proxy_strategies.py +59 -59
  110. tests/test_redis_config.py +28 -28
  111. tests/test_redis_queue.py +224 -224
  112. tests/test_request_serialization.py +70 -70
  113. tests/test_scheduler.py +241 -241
  114. crawlo-1.1.3.dist-info/METADATA +0 -635
  115. crawlo-1.1.3.dist-info/RECORD +0 -113
  116. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
  117. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
  118. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
@@ -1,213 +1,213 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- from yarl import URL
4
- from typing import Optional
5
- from aiohttp import (
6
- ClientSession,
7
- TCPConnector,
8
- ClientTimeout,
9
- TraceConfig,
10
- ClientResponse,
11
- ClientError,
12
- BasicAuth,
13
- )
14
-
15
- from crawlo.network.response import Response
16
- from crawlo.utils.log import get_logger
17
- from crawlo.downloader import DownloaderBase
18
-
19
-
20
- class AioHttpDownloader(DownloaderBase):
21
- """
22
- 高性能异步下载器
23
- - 基于持久化 ClientSession
24
- - 智能识别 Request 的高层语义(json_body/form_data)
25
- - 支持 GET/POST/PUT/DELETE 等方法
26
- - 支持中间件设置的 IP 代理(HTTP/HTTPS)
27
- - 内存安全防护
28
- """
29
-
30
- def __init__(self, crawler):
31
- super().__init__(crawler)
32
- self.session: Optional[ClientSession] = None
33
- self.max_download_size: int = 0
34
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
35
-
36
- def open(self):
37
- super().open()
38
- self.logger.info("Opening AioHttpDownloader")
39
-
40
- # 读取配置
41
- timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
42
- verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
43
- pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
44
- pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
45
- self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
46
-
47
- # 创建连接器
48
- connector = TCPConnector(
49
- verify_ssl=verify_ssl,
50
- limit=pool_limit,
51
- limit_per_host=pool_per_host,
52
- ttl_dns_cache=300,
53
- keepalive_timeout=15,
54
- force_close=False,
55
- )
56
-
57
- # 超时控制
58
- timeout = ClientTimeout(total=timeout_secs)
59
-
60
- # 请求追踪
61
- trace_config = TraceConfig()
62
- trace_config.on_request_start.append(self._on_request_start)
63
- trace_config.on_request_end.append(self._on_request_end)
64
- trace_config.on_request_exception.append(self._on_request_exception)
65
-
66
- # 创建全局 session
67
- self.session = ClientSession(
68
- connector=connector,
69
- timeout=timeout,
70
- trace_configs=[trace_config],
71
- auto_decompress=True,
72
- )
73
-
74
- self.logger.debug("AioHttpDownloader initialized.")
75
-
76
- async def download(self, request) -> Optional[Response]:
77
- """下载请求并返回响应"""
78
- if not self.session or self.session.closed:
79
- raise RuntimeError("AioHttpDownloader session is not open.")
80
-
81
- start_time = None
82
- if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
83
- import time
84
- start_time = time.time()
85
-
86
- try:
87
- # 使用通用发送逻辑(支持所有 HTTP 方法)
88
- async with await self._send_request(self.session, request) as resp:
89
- # 安全检查:防止大响应体导致 OOM
90
- content_length = resp.headers.get("Content-Length")
91
- if content_length and int(content_length) > self.max_download_size:
92
- raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
93
-
94
- body = await resp.read()
95
- response = self._structure_response(request, resp, body)
96
-
97
- # 记录下载统计
98
- if start_time:
99
- download_time = time.time() - start_time
100
- self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
101
-
102
- return response
103
-
104
- except ClientError as e:
105
- self.logger.error(f"Client error for {request.url}: {e}")
106
- raise
107
- except Exception as e:
108
- self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
109
- raise
110
-
111
- @staticmethod
112
- async def _send_request(session: ClientSession, request) -> ClientResponse:
113
- """
114
- 根据请求方法和高层语义智能发送请求。
115
- 支持中间件设置的 proxy,兼容以下格式:
116
- - str: "http://user:pass@host:port"
117
- - dict: {"http": "...", "https": "..."} (自动取 http 或 https 字段)
118
- """
119
- method = request.method.lower()
120
- if not hasattr(session, method):
121
- raise ValueError(f"Unsupported HTTP method: {request.method}")
122
-
123
- method_func = getattr(session, method)
124
-
125
- # 构造参数
126
- kwargs = {
127
- "headers": request.headers,
128
- "cookies": request.cookies,
129
- "allow_redirects": request.allow_redirects,
130
- }
131
-
132
- # === 处理代理(proxy)===
133
- proxy = getattr(request, "proxy", None)
134
- proxy_auth = None
135
-
136
- if proxy:
137
- # 兼容字典格式:{"http": "http://...", "https": "http://..."}
138
- if isinstance(proxy, dict):
139
- # 优先使用 https,否则用 http
140
- proxy = proxy.get("https") or proxy.get("http")
141
-
142
- if not isinstance(proxy, (str, URL)):
143
- raise ValueError(f"proxy must be str or URL, got {type(proxy)}")
144
-
145
- try:
146
- proxy_url = URL(proxy)
147
- if proxy_url.scheme not in ("http", "https"):
148
- raise ValueError(f"Unsupported proxy scheme: {proxy_url.scheme}, only HTTP/HTTPS supported.")
149
-
150
- # 提取认证信息
151
- if proxy_url.user and proxy_url.password:
152
- proxy_auth = BasicAuth(proxy_url.user, proxy_url.password)
153
- # 去掉用户密码的 URL
154
- proxy = str(proxy_url.with_user(None))
155
- else:
156
- proxy = str(proxy_url)
157
-
158
- kwargs["proxy"] = proxy
159
- if proxy_auth:
160
- kwargs["proxy_auth"] = proxy_auth
161
-
162
- except Exception as e:
163
- raise ValueError(f"Invalid proxy URL: {proxy}") from e
164
-
165
- # === 处理请求体 ===
166
- if hasattr(request, "_json_body") and request._json_body is not None:
167
- kwargs["json"] = request._json_body
168
- elif isinstance(request.body, (dict, list)):
169
- kwargs["json"] = request.body
170
- else:
171
- if request.body is not None:
172
- kwargs["data"] = request.body
173
-
174
- return await method_func(request.url, **kwargs)
175
-
176
- @staticmethod
177
- def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
178
- """构造框架所需的 Response 对象"""
179
- return Response(
180
- url=str(resp.url),
181
- headers=dict(resp.headers),
182
- status_code=resp.status,
183
- body=body,
184
- request=request,
185
- )
186
-
187
- # --- 请求追踪日志 ---
188
- async def _on_request_start(self, session, trace_config_ctx, params):
189
- """请求开始时的回调。"""
190
- proxy = getattr(params, "proxy", None)
191
- proxy_info = f" via {proxy}" if proxy else ""
192
- self.logger.debug(f"Requesting: {params.method} {params.url}{proxy_info}")
193
-
194
- async def _on_request_end(self, session, trace_config_ctx, params):
195
- """请求成功结束时的回调。"""
196
- response = params.response
197
- self.logger.debug(
198
- f"Finished: {params.method} {params.url} with status {response.status}"
199
- )
200
-
201
- async def _on_request_exception(self, session, trace_config_ctx, params):
202
- """请求发生异常时的回调。"""
203
- exc = params.exception
204
- self.logger.warning(
205
- f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
206
- )
207
-
208
- async def close(self) -> None:
209
- """关闭会话资源"""
210
- if self.session and not self.session.closed:
211
- self.logger.info("Closing AioHttpDownloader session...")
212
- await self.session.close()
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ from yarl import URL
4
+ from typing import Optional
5
+ from aiohttp import (
6
+ ClientSession,
7
+ TCPConnector,
8
+ ClientTimeout,
9
+ TraceConfig,
10
+ ClientResponse,
11
+ ClientError,
12
+ BasicAuth,
13
+ )
14
+
15
+ from crawlo.network.response import Response
16
+ from crawlo.utils.log import get_logger
17
+ from crawlo.downloader import DownloaderBase
18
+
19
+
20
+ class AioHttpDownloader(DownloaderBase):
21
+ """
22
+ 高性能异步下载器
23
+ - 基于持久化 ClientSession
24
+ - 智能识别 Request 的高层语义(json_body/form_data)
25
+ - 支持 GET/POST/PUT/DELETE 等方法
26
+ - 支持中间件设置的 IP 代理(HTTP/HTTPS)
27
+ - 内存安全防护
28
+ """
29
+
30
+ def __init__(self, crawler):
31
+ super().__init__(crawler)
32
+ self.session: Optional[ClientSession] = None
33
+ self.max_download_size: int = 0
34
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
35
+
36
+ def open(self):
37
+ super().open()
38
+ self.logger.info("Opening AioHttpDownloader")
39
+
40
+ # 读取配置
41
+ timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
42
+ verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
43
+ pool_limit = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
44
+ pool_per_host = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT_PER_HOST", 20)
45
+ self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
46
+
47
+ # 创建连接器
48
+ connector = TCPConnector(
49
+ verify_ssl=verify_ssl,
50
+ limit=pool_limit,
51
+ limit_per_host=pool_per_host,
52
+ ttl_dns_cache=300,
53
+ keepalive_timeout=15,
54
+ force_close=False,
55
+ )
56
+
57
+ # 超时控制
58
+ timeout = ClientTimeout(total=timeout_secs)
59
+
60
+ # 请求追踪
61
+ trace_config = TraceConfig()
62
+ trace_config.on_request_start.append(self._on_request_start)
63
+ trace_config.on_request_end.append(self._on_request_end)
64
+ trace_config.on_request_exception.append(self._on_request_exception)
65
+
66
+ # 创建全局 session
67
+ self.session = ClientSession(
68
+ connector=connector,
69
+ timeout=timeout,
70
+ trace_configs=[trace_config],
71
+ auto_decompress=True,
72
+ )
73
+
74
+ self.logger.debug("AioHttpDownloader initialized.")
75
+
76
+ async def download(self, request) -> Optional[Response]:
77
+ """下载请求并返回响应"""
78
+ if not self.session or self.session.closed:
79
+ raise RuntimeError("AioHttpDownloader session is not open.")
80
+
81
+ start_time = None
82
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
83
+ import time
84
+ start_time = time.time()
85
+
86
+ try:
87
+ # 使用通用发送逻辑(支持所有 HTTP 方法)
88
+ async with await self._send_request(self.session, request) as resp:
89
+ # 安全检查:防止大响应体导致 OOM
90
+ content_length = resp.headers.get("Content-Length")
91
+ if content_length and int(content_length) > self.max_download_size:
92
+ raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
93
+
94
+ body = await resp.read()
95
+ response = self._structure_response(request, resp, body)
96
+
97
+ # 记录下载统计
98
+ if start_time:
99
+ download_time = time.time() - start_time
100
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
101
+
102
+ return response
103
+
104
+ except ClientError as e:
105
+ self.logger.error(f"Client error for {request.url}: {e}")
106
+ raise
107
+ except Exception as e:
108
+ self.logger.critical(f"Unexpected error for {request.url}: {e}", exc_info=True)
109
+ raise
110
+
111
+ @staticmethod
112
+ async def _send_request(session: ClientSession, request) -> ClientResponse:
113
+ """
114
+ 根据请求方法和高层语义智能发送请求。
115
+ 支持中间件设置的 proxy,兼容以下格式:
116
+ - str: "http://user:pass@host:port"
117
+ - dict: {"http": "...", "https": "..."} (自动取 http 或 https 字段)
118
+ """
119
+ method = request.method.lower()
120
+ if not hasattr(session, method):
121
+ raise ValueError(f"Unsupported HTTP method: {request.method}")
122
+
123
+ method_func = getattr(session, method)
124
+
125
+ # 构造参数
126
+ kwargs = {
127
+ "headers": request.headers,
128
+ "cookies": request.cookies,
129
+ "allow_redirects": request.allow_redirects,
130
+ }
131
+
132
+ # === 处理代理(proxy)===
133
+ proxy = getattr(request, "proxy", None)
134
+ proxy_auth = None
135
+
136
+ if proxy:
137
+ # 兼容字典格式:{"http": "http://...", "https": "http://..."}
138
+ if isinstance(proxy, dict):
139
+ # 优先使用 https,否则用 http
140
+ proxy = proxy.get("https") or proxy.get("http")
141
+
142
+ if not isinstance(proxy, (str, URL)):
143
+ raise ValueError(f"proxy must be str or URL, got {type(proxy)}")
144
+
145
+ try:
146
+ proxy_url = URL(proxy)
147
+ if proxy_url.scheme not in ("http", "https"):
148
+ raise ValueError(f"Unsupported proxy scheme: {proxy_url.scheme}, only HTTP/HTTPS supported.")
149
+
150
+ # 提取认证信息
151
+ if proxy_url.user and proxy_url.password:
152
+ proxy_auth = BasicAuth(proxy_url.user, proxy_url.password)
153
+ # 去掉用户密码的 URL
154
+ proxy = str(proxy_url.with_user(None))
155
+ else:
156
+ proxy = str(proxy_url)
157
+
158
+ kwargs["proxy"] = proxy
159
+ if proxy_auth:
160
+ kwargs["proxy_auth"] = proxy_auth
161
+
162
+ except Exception as e:
163
+ raise ValueError(f"Invalid proxy URL: {proxy}") from e
164
+
165
+ # === 处理请求体 ===
166
+ if hasattr(request, "_json_body") and request._json_body is not None:
167
+ kwargs["json"] = request._json_body
168
+ elif isinstance(request.body, (dict, list)):
169
+ kwargs["json"] = request.body
170
+ else:
171
+ if request.body is not None:
172
+ kwargs["data"] = request.body
173
+
174
+ return await method_func(request.url, **kwargs)
175
+
176
+ @staticmethod
177
+ def _structure_response(request, resp: ClientResponse, body: bytes) -> Response:
178
+ """构造框架所需的 Response 对象"""
179
+ return Response(
180
+ url=str(resp.url),
181
+ headers=dict(resp.headers),
182
+ status_code=resp.status,
183
+ body=body,
184
+ request=request,
185
+ )
186
+
187
+ # --- 请求追踪日志 ---
188
+ async def _on_request_start(self, session, trace_config_ctx, params):
189
+ """请求开始时的回调。"""
190
+ proxy = getattr(params, "proxy", None)
191
+ proxy_info = f" via {proxy}" if proxy else ""
192
+ self.logger.debug(f"Requesting: {params.method} {params.url}{proxy_info}")
193
+
194
+ async def _on_request_end(self, session, trace_config_ctx, params):
195
+ """请求成功结束时的回调。"""
196
+ response = params.response
197
+ self.logger.debug(
198
+ f"Finished: {params.method} {params.url} with status {response.status}"
199
+ )
200
+
201
+ async def _on_request_exception(self, session, trace_config_ctx, params):
202
+ """请求发生异常时的回调。"""
203
+ exc = params.exception
204
+ self.logger.warning(
205
+ f"Failed: {params.method} {params.url} with exception {type(exc).__name__}: {exc}"
206
+ )
207
+
208
+ async def close(self) -> None:
209
+ """关闭会话资源"""
210
+ if self.session and not self.session.closed:
211
+ self.logger.info("Closing AioHttpDownloader session...")
212
+ await self.session.close()
213
213
  self.logger.debug("AioHttpDownloader closed.")