crawlo 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (120) hide show
  1. crawlo/__init__.py +34 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -155
  6. crawlo/commands/genspider.py +152 -111
  7. crawlo/commands/list.py +156 -119
  8. crawlo/commands/run.py +285 -170
  9. crawlo/commands/startproject.py +196 -101
  10. crawlo/commands/stats.py +188 -167
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +162 -57
  18. crawlo/crawler.py +1028 -493
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +252 -277
  22. crawlo/downloader/httpx_downloader.py +257 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +78 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +269 -166
  49. crawlo/pipelines/__init__.py +13 -13
  50. crawlo/pipelines/console_pipeline.py +39 -39
  51. crawlo/pipelines/csv_pipeline.py +317 -0
  52. crawlo/pipelines/json_pipeline.py +219 -0
  53. crawlo/pipelines/mongo_pipeline.py +116 -116
  54. crawlo/pipelines/mysql_pipeline.py +195 -195
  55. crawlo/pipelines/pipeline_manager.py +56 -56
  56. crawlo/project.py +153 -0
  57. crawlo/queue/pqueue.py +37 -0
  58. crawlo/queue/queue_manager.py +304 -0
  59. crawlo/queue/redis_priority_queue.py +192 -0
  60. crawlo/settings/__init__.py +7 -7
  61. crawlo/settings/default_settings.py +226 -169
  62. crawlo/settings/setting_manager.py +99 -99
  63. crawlo/spider/__init__.py +639 -129
  64. crawlo/stats_collector.py +59 -59
  65. crawlo/subscriber.py +106 -106
  66. crawlo/task_manager.py +30 -27
  67. crawlo/templates/crawlo.cfg.tmpl +10 -10
  68. crawlo/templates/project/__init__.py.tmpl +3 -3
  69. crawlo/templates/project/items.py.tmpl +17 -17
  70. crawlo/templates/project/middlewares.py.tmpl +87 -76
  71. crawlo/templates/project/pipelines.py.tmpl +336 -64
  72. crawlo/templates/project/run.py.tmpl +239 -0
  73. crawlo/templates/project/settings.py.tmpl +248 -54
  74. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  75. crawlo/templates/spider/spider.py.tmpl +178 -32
  76. crawlo/utils/__init__.py +7 -7
  77. crawlo/utils/controlled_spider_mixin.py +336 -0
  78. crawlo/utils/date_tools.py +233 -233
  79. crawlo/utils/db_helper.py +343 -343
  80. crawlo/utils/func_tools.py +82 -82
  81. crawlo/utils/large_scale_config.py +287 -0
  82. crawlo/utils/large_scale_helper.py +344 -0
  83. crawlo/utils/log.py +128 -128
  84. crawlo/utils/queue_helper.py +176 -0
  85. crawlo/utils/request.py +267 -267
  86. crawlo/utils/request_serializer.py +220 -0
  87. crawlo/utils/spider_loader.py +62 -62
  88. crawlo/utils/system.py +11 -11
  89. crawlo/utils/tools.py +4 -4
  90. crawlo/utils/url.py +39 -39
  91. crawlo-1.1.2.dist-info/METADATA +567 -0
  92. crawlo-1.1.2.dist-info/RECORD +108 -0
  93. examples/__init__.py +7 -0
  94. tests/__init__.py +7 -7
  95. tests/test_final_validation.py +154 -0
  96. tests/test_proxy_health_check.py +32 -32
  97. tests/test_proxy_middleware_integration.py +136 -136
  98. tests/test_proxy_providers.py +56 -56
  99. tests/test_proxy_stats.py +19 -19
  100. tests/test_proxy_strategies.py +59 -59
  101. tests/test_redis_config.py +29 -0
  102. tests/test_redis_queue.py +225 -0
  103. tests/test_request_serialization.py +71 -0
  104. tests/test_scheduler.py +242 -0
  105. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  106. crawlo/utils/concurrency_manager.py +0 -125
  107. crawlo/utils/pqueue.py +0 -174
  108. crawlo/utils/project.py +0 -197
  109. crawlo-1.1.0.dist-info/METADATA +0 -49
  110. crawlo-1.1.0.dist-info/RECORD +0 -97
  111. examples/gxb/items.py +0 -36
  112. examples/gxb/run.py +0 -16
  113. examples/gxb/settings.py +0 -72
  114. examples/gxb/spider/__init__.py +0 -2
  115. examples/gxb/spider/miit_spider.py +0 -180
  116. examples/gxb/spider/telecom_device.py +0 -129
  117. {examples/gxb → crawlo/queue}/__init__.py +0 -0
  118. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
  119. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
  120. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,192 @@
1
+ import pickle
2
+ import time
3
+ import asyncio
4
+ from typing import Optional
5
+ import redis.asyncio as aioredis
6
+
7
+ from crawlo import Request
8
+ from crawlo.utils.log import get_logger
9
+ from crawlo.utils.request_serializer import RequestSerializer
10
+
11
+
12
+ logger = get_logger(__name__)
13
+
14
+
15
+ class RedisPriorityQueue:
16
+ """
17
+ 基于 Redis 的分布式异步优先级队列(生产级优化版)
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ redis_url: str = "redis://localhost:6379/0",
23
+ queue_name: str = "crawlo:requests",
24
+ processing_queue: str = "crawlo:processing",
25
+ failed_queue: str = "crawlo:failed",
26
+ max_retries: int = 3,
27
+ timeout: int = 300, # 任务处理超时时间(秒)
28
+ max_connections: int = 10, # 连接池大小
29
+ ):
30
+ self.redis_url = redis_url
31
+ self.queue_name = queue_name
32
+ self.processing_queue = processing_queue
33
+ self.failed_queue = failed_queue
34
+ self.max_retries = max_retries
35
+ self.timeout = timeout
36
+ self.max_connections = max_connections
37
+ self._redis = None
38
+ self._lock = asyncio.Lock() # 用于连接初始化的锁
39
+ self.request_serializer = RequestSerializer() # 处理序列化
40
+
41
+ async def connect(self, max_retries=3, delay=1):
42
+ """异步连接 Redis,支持重试"""
43
+ async with self._lock:
44
+ if self._redis is not None:
45
+ return self._redis
46
+
47
+ for attempt in range(max_retries):
48
+ try:
49
+ self._redis = await aioredis.from_url(
50
+ self.redis_url,
51
+ decode_responses=False, # pickle 需要 bytes
52
+ max_connections=self.max_connections,
53
+ socket_connect_timeout=5,
54
+ socket_timeout=30,
55
+ )
56
+ # 测试连接
57
+ await self._redis.ping()
58
+ logger.info("✅ Redis 连接成功")
59
+ return self._redis
60
+ except Exception as e:
61
+ logger.warning(f"⚠️ Redis 连接失败 (尝试 {attempt + 1}/{max_retries}): {e}")
62
+ if attempt < max_retries - 1:
63
+ await asyncio.sleep(delay)
64
+ else:
65
+ raise ConnectionError(f"❌ 无法连接 Redis: {e}")
66
+
67
+ async def _ensure_connection(self):
68
+ """确保连接有效"""
69
+ if self._redis is None:
70
+ await self.connect()
71
+ try:
72
+ await self._redis.ping()
73
+ except Exception:
74
+ logger.warning("🔄 Redis 连接失效,尝试重连...")
75
+ self._redis = None
76
+ await self.connect()
77
+
78
+ async def put(self, request: Request, priority: int = 0) -> bool:
79
+ """放入请求到队列"""
80
+ await self._ensure_connection()
81
+ score = -priority
82
+ key = self._get_request_key(request)
83
+ try:
84
+ # 🔥 使用专用的序列化工具清理 Request
85
+ clean_request = self.request_serializer.prepare_for_serialization(request)
86
+
87
+ serialized = pickle.dumps(clean_request)
88
+ pipe = self._redis.pipeline()
89
+ pipe.zadd(self.queue_name, {key: score})
90
+ pipe.hset(f"{self.queue_name}:data", key, serialized)
91
+ result = await pipe.execute()
92
+
93
+ if result[0] > 0:
94
+ logger.debug(f"✅ 成功入队: {request.url}")
95
+ return result[0] > 0
96
+ except Exception as e:
97
+ logger.error(f"❌ 放入队列失败: {e}")
98
+ return False
99
+
100
+ async def get(self, timeout: float = 5.0) -> Optional[Request]:
101
+ """
102
+ 获取请求(带超时)
103
+ :param timeout: 最大等待时间(秒),避免无限轮询
104
+ """
105
+ await self._ensure_connection()
106
+ start_time = asyncio.get_event_loop().time()
107
+
108
+ while True:
109
+ try:
110
+ # 尝试获取任务
111
+ result = await self._redis.zpopmin(self.queue_name, count=1)
112
+ if result:
113
+ key, score = result[0]
114
+ serialized = await self._redis.hget(f"{self.queue_name}:data", key)
115
+ if not serialized:
116
+ continue
117
+
118
+ # 移动到 processing
119
+ processing_key = f"{key}:{int(time.time())}"
120
+ pipe = self._redis.pipeline()
121
+ pipe.zadd(self.processing_queue, {processing_key: time.time() + self.timeout})
122
+ pipe.hset(f"{self.processing_queue}:data", processing_key, serialized)
123
+ pipe.hdel(f"{self.queue_name}:data", key)
124
+ await pipe.execute()
125
+
126
+ return pickle.loads(serialized)
127
+
128
+ # 检查是否超时
129
+ if asyncio.get_event_loop().time() - start_time > timeout:
130
+ return None
131
+
132
+ # 短暂等待,避免空轮询
133
+ await asyncio.sleep(0.1)
134
+
135
+ except Exception as e:
136
+ logger.error(f"❌ 获取队列任务失败: {e}")
137
+ return None
138
+
139
+ async def ack(self, request: Request):
140
+ """确认任务完成"""
141
+ await self._ensure_connection()
142
+ key = self._get_request_key(request)
143
+ cursor = 0
144
+ while True:
145
+ cursor, keys = await self._redis.zscan(self.processing_queue, cursor, match=f"{key}:*")
146
+ if keys:
147
+ pipe = self._redis.pipeline()
148
+ for k in keys:
149
+ pipe.zrem(self.processing_queue, k)
150
+ pipe.hdel(f"{self.processing_queue}:data", k)
151
+ await pipe.execute()
152
+ if cursor == 0:
153
+ break
154
+
155
+ async def fail(self, request: Request, reason: str = ""):
156
+ """标记任务失败"""
157
+ await self._ensure_connection()
158
+ key = self._get_request_key(request)
159
+ await self.ack(request)
160
+
161
+ retry_key = f"{self.failed_queue}:retries:{key}"
162
+ retries = await self._redis.incr(retry_key)
163
+ await self._redis.expire(retry_key, 86400)
164
+
165
+ if retries <= self.max_retries:
166
+ await self.put(request, priority=request.priority + 1)
167
+ logger.info(f"🔁 任务重试 [{retries}/{self.max_retries}]: {request.url}")
168
+ else:
169
+ failed_data = {
170
+ "url": request.url,
171
+ "reason": reason,
172
+ "retries": retries,
173
+ "failed_at": time.time(),
174
+ "request_pickle": pickle.dumps(request).hex(), # 可选:保存完整请求
175
+ }
176
+ await self._redis.lpush(self.failed_queue, pickle.dumps(failed_data))
177
+ logger.error(f"❌ 任务彻底失败 [{retries}次]: {request.url}")
178
+
179
+ def _get_request_key(self, request: Request) -> str:
180
+ """生成请求唯一键"""
181
+ return f"url:{hash(request.url)}"
182
+
183
+ async def qsize(self) -> int:
184
+ """获取队列大小"""
185
+ await self._ensure_connection()
186
+ return await self._redis.zcard(self.queue_name)
187
+
188
+ async def close(self):
189
+ """关闭连接"""
190
+ if self._redis:
191
+ await self._redis.close()
192
+ self._redis = None
@@ -1,7 +1,7 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-05-11 11:08
5
- # @Author : oscar
6
- # @Desc : None
7
- """
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-05-11 11:08
5
+ # @Author : oscar
6
+ # @Desc : None
7
+ """
@@ -1,169 +1,226 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- ==================================
5
- Crawlo 项目配置文件
6
- ==================================
7
- 说明:
8
- - 所有配置项均已按功能模块分类。
9
- - 支持通过环境变量覆盖部分敏感配置(如 Redis、MySQL 密码等)。
10
- - 可根据需求启用/禁用组件(如 MySQL、Redis、Proxy 等)。
11
- """
12
-
13
- import os
14
-
15
- # ============================== 核心信息 ==============================
16
- PROJECT_NAME = 'crawlo'
17
- VERSION = 1.0
18
-
19
- # ============================== 网络请求配置 ==============================
20
-
21
- # 下载器选择(三选一)
22
- DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader"
23
- # DOWNLOADER = "crawlo.downloader.cffi_downloader.CurlCffiDownloader" # 支持浏览器指纹
24
- # DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader"
25
-
26
- # 请求超时与安全
27
- DOWNLOAD_TIMEOUT = 60 # 下载超时时间(秒)
28
- VERIFY_SSL = True # 是否验证 SSL 证书
29
- USE_SESSION = True # 是否使用持久化会话(aiohttp 特有)
30
-
31
- # 请求延迟控制
32
- DOWNLOAD_DELAY = 0.5 # 基础延迟(秒)
33
- RANDOM_RANGE = (0.75, 1.25) # 随机延迟系数范围(如 0.75~1.25 倍)
34
- RANDOMNESS = True # 是否启用随机延迟
35
-
36
- # 重试策略
37
- MAX_RETRY_TIMES = 3 # 最大重试次数
38
- RETRY_PRIORITY = -1 # 重试请求的优先级调整
39
- RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524] # 触发重试的状态码
40
- IGNORE_HTTP_CODES = [403, 404] # 直接标记成功、不重试的状态码
41
- ALLOWED_CODES = [] # 允许的状态码(空表示不限制)
42
-
43
- # 连接与响应大小限制
44
- CONNECTION_POOL_LIMIT = 100 # 最大并发连接数(连接池大小)
45
- DOWNLOAD_MAXSIZE = 10 * 1024 * 1024 # 最大响应体大小(10MB)
46
- DOWNLOAD_WARN_SIZE = 1024 * 1024 # 响应体警告阈值(1MB)
47
- DOWNLOAD_RETRY_TIMES = MAX_RETRY_TIMES # 下载器内部重试次数(复用全局)
48
-
49
- # ============================== 并发与调度 ==============================
50
-
51
- CONCURRENCY = 8 # 单个爬虫的并发请求数
52
- INTERVAL = 5 # 日志统计输出间隔(秒)
53
- DEPTH_PRIORITY = 1 # 深度优先策略优先级
54
- MAX_RUNNING_SPIDERS = 3 # 最大同时运行的爬虫数
55
-
56
- # ============================== 数据存储配置 ==============================
57
-
58
- # --- MySQL 配置 ---
59
- MYSQL_HOST = '127.0.0.1'
60
- MYSQL_PORT = 3306
61
- MYSQL_USER = 'root'
62
- MYSQL_PASSWORD = '123456'
63
- MYSQL_DB = 'crawl'
64
- MYSQL_TABLE = 'crawlo'
65
- MYSQL_BATCH_SIZE = 100 # 批量插入条数
66
-
67
- # MySQL 连接池
68
- MYSQL_FLUSH_INTERVAL = 5 # 缓存刷新间隔(秒)
69
- MYSQL_POOL_MIN = 5
70
- MYSQL_POOL_MAX = 20
71
- MYSQL_ECHO = False # 是否打印 SQL 日志
72
-
73
- # --- MongoDB 配置 ---
74
- MONGO_URI = 'mongodb://user:password@host:27017'
75
- MONGO_DATABASE = 'scrapy_data'
76
- MONGO_COLLECTION = 'crawled_items'
77
- MONGO_MAX_POOL_SIZE = 200
78
- MONGO_MIN_POOL_SIZE = 20
79
-
80
- # ============================== 去重过滤配置 ==============================
81
-
82
- # 请求指纹存储目录(文件过滤器使用)
83
- REQUEST_DIR = '.'
84
-
85
- # 去重过滤器类(二选一)
86
- FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
87
- # FILTER_CLASS = 'crawlo.filters.redis_filter.AioRedisFilter' # 分布式去重
88
-
89
- # --- Redis 过滤器配置 ---
90
- REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
91
- REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
92
- REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', 'oscar&0503')
93
- REDIS_URL = f'redis://:{REDIS_PASSWORD or ""}@{REDIS_HOST}:{REDIS_PORT}/0'
94
- REDIS_KEY = 'request_fingerprint' # Redis 中存储指纹的键名
95
- REDIS_TTL = 0 # 指纹过期时间(0 表示永不过期)
96
- CLEANUP_FP = 0 # 程序结束时是否清理指纹(0=不清理)
97
- FILTER_DEBUG = True # 是否开启去重调试日志
98
- DECODE_RESPONSES = True # Redis 返回是否解码为字符串
99
-
100
- # ============================== 中间件配置 ==============================
101
-
102
- MIDDLEWARES = [
103
- # === 请求预处理阶段 ===
104
- 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware', # 1. 忽略无效请求
105
- 'crawlo.middleware.download_delay.DownloadDelayMiddleware', # 2. 控制请求频率
106
- 'crawlo.middleware.default_header.DefaultHeaderMiddleware', # 3. 添加默认请求头
107
- 'crawlo.middleware.proxy.ProxyMiddleware', # 4. 设置代理
108
-
109
- # === 响应处理阶段 ===
110
- 'crawlo.middleware.retry.RetryMiddleware', # 5. 失败请求重试
111
- 'crawlo.middleware.response_code.ResponseCodeMiddleware', # 6. 处理特殊状态码
112
- 'crawlo.middleware.response_filter.ResponseFilterMiddleware', # 7. 响应内容过滤
113
- ]
114
-
115
- # ============================== 扩展与管道 ==============================
116
-
117
- # 数据处理管道(启用的存储方式)
118
- PIPELINES = [
119
- 'crawlo.pipelines.console_pipeline.ConsolePipeline', # 控制台输出
120
- # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(可选)
121
- ]
122
-
123
- # 扩展组件(监控与日志)
124
- EXTENSIONS = [
125
- 'crawlo.extension.log_interval.LogIntervalExtension', # 定时日志
126
- 'crawlo.extension.log_stats.LogStats', # 统计信息
127
- 'crawlo.extension.logging_extension.CustomLoggerExtension', # 自定义日志
128
- ]
129
-
130
- # ============================== 日志与监控 ==============================
131
-
132
- LOG_LEVEL = 'INFO' # 日志级别: DEBUG/INFO/WARNING/ERROR
133
- STATS_DUMP = True # 是否周期性输出统计信息
134
- LOG_FILE = f'logs/{PROJECT_NAME}.log' # 日志文件路径
135
- LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
136
- LOG_ENCODING = 'utf-8'
137
-
138
- # ============================== 代理配置 ==============================
139
-
140
- PROXY_ENABLED = False # 是否启用代理
141
- PROXY_API_URL = "https://api.proxyprovider.com/get" # 代理获取接口(请替换为真实地址)
142
-
143
- # 代理提取方式(支持字段路径或函数)
144
- PROXY_EXTRACTOR = "proxy" # 如返回 {"proxy": "http://1.1.1.1:8080"}
145
-
146
- # 代理刷新控制
147
- PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
148
- PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
149
-
150
- # ============================== Curl-Cffi 特有配置 ==============================
151
-
152
- # 浏览器指纹模拟(仅 CurlCffi 下载器有效)
153
- CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
154
-
155
- # 自定义浏览器版本映射(可覆盖默认行为)
156
- CURL_BROWSER_VERSION_MAP = {
157
- "chrome": "chrome136",
158
- "edge": "edge101",
159
- "safari": "safari184",
160
- "firefox": "firefox135",
161
- # 示例:旧版本测试
162
- # "chrome_legacy": "chrome110",
163
- }
164
-
165
- # 默认请求头(可被 Spider 覆盖)
166
- DEFAULT_REQUEST_HEADERS = {
167
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
168
- '(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
169
- }
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ ==================================
5
+ Crawlo 项目配置文件
6
+ ==================================
7
+ 说明:
8
+ - 所有配置项均已按功能模块分类。
9
+ - 支持通过环境变量覆盖部分敏感配置(如 Redis、MySQL 密码等)。
10
+ - 可根据需求启用/禁用组件(如 MySQL、Redis、Proxy 等)。
11
+ """
12
+ import os
13
+
14
+ # ============================== 核心信息 ==============================
15
+ PROJECT_NAME = 'crawlo'
16
+
17
+ # ============================== 网络请求配置 ==============================
18
+
19
+ # 下载器选择(支持三种方式)
20
+ # 方式1: 直接指定类路径
21
+ DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader"
22
+ # DOWNLOADER = "crawlo.downloader.cffi_downloader.CurlCffiDownloader" # 支持浏览器指纹
23
+ # DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader" # 支持HTTP/2
24
+
25
+ # 方式2: 使用简化名称(推荐)
26
+ # DOWNLOADER_TYPE = 'aiohttp' # 可选: aiohttp, httpx, curl_cffi, cffi
27
+
28
+ # 方式3: 在Spider中动态选择
29
+ # 可以在Spider类中设置 custom_settings = {'DOWNLOADER_TYPE': 'httpx'}
30
+
31
+ # 请求超时与安全
32
+ DOWNLOAD_TIMEOUT = 30 # 下载超时时间(秒)
33
+ VERIFY_SSL = True # 是否验证 SSL 证书
34
+ USE_SESSION = True # 是否使用持久化会话(aiohttp 特有)
35
+
36
+ # 请求延迟控制
37
+ DOWNLOAD_DELAY = 1.0 # 基础延迟(秒)
38
+ RANDOM_RANGE = (0.8, 1.2) # 随机延迟系数范围
39
+ RANDOMNESS = True # 是否启用随机延迟
40
+
41
+ # 重试策略
42
+ MAX_RETRY_TIMES = 3 # 最大重试次数
43
+ RETRY_PRIORITY = -1 # 重试请求的优先级调整
44
+ RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524] # 触发重试的状态码
45
+ IGNORE_HTTP_CODES = [403, 404] # 直接标记成功、不重试的状态码
46
+ ALLOWED_CODES = [] # 允许的状态码(空表示不限制)
47
+
48
+ # 连接与响应大小限制
49
+ CONNECTION_POOL_LIMIT = 50 # 最大并发连接数(连接池大小)
50
+ CONNECTION_POOL_LIMIT_PER_HOST = 20 # 每个主机的连接池大小
51
+ DOWNLOAD_MAXSIZE = 10 * 1024 * 1024 # 最大响应体大小(10MB)
52
+ DOWNLOAD_WARN_SIZE = 1024 * 1024 # 响应体警告阈值(1MB)
53
+ DOWNLOAD_RETRY_TIMES = MAX_RETRY_TIMES # 下载器内部重试次数(复用全局)
54
+
55
+ # 下载统计配置
56
+ DOWNLOADER_STATS = True # 是否启用下载器统计功能
57
+ DOWNLOAD_STATS = True # 是否记录下载时间和大小统计
58
+
59
+ # ============================== 并发与调度 ==============================
60
+
61
+ CONCURRENCY = 8 # 单个爬虫的并发请求数
62
+ INTERVAL = 5 # 日志统计输出间隔(秒)
63
+ DEPTH_PRIORITY = 1 # 深度优先策略优先级
64
+ MAX_RUNNING_SPIDERS = 3 # 最大同时运行的爬虫数
65
+
66
+ # ============================== 队列配置 ==============================
67
+
68
+ # 🎯 运行模式选择:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
69
+ RUN_MODE = 'standalone' # 默认单机模式,简单易用
70
+
71
+ # 队列类型选择:'memory'(内存), 'redis'(分布式), 'auto'(自动选择)
72
+ QUEUE_TYPE = 'memory' # 默认内存队列,无需外部依赖
73
+ SCHEDULER_MAX_QUEUE_SIZE = 2000 # 调度器队列最大容量
74
+ SCHEDULER_QUEUE_NAME = 'crawlo:requests' # Redis 队列名称
75
+ QUEUE_MAX_RETRIES = 3 # 队列操作最大重试次数
76
+ QUEUE_TIMEOUT = 300 # 队列操作超时时间(秒)
77
+
78
+ # 大规模爬取优化配置
79
+ LARGE_SCALE_BATCH_SIZE = 1000 # 批处理大小
80
+ LARGE_SCALE_CHECKPOINT_INTERVAL = 5000 # 进度保存间隔
81
+ LARGE_SCALE_MAX_MEMORY_USAGE = 500 # 最大内存使用量(MB)
82
+
83
+ # ============================== 数据存储配置 ==============================
84
+
85
+ # --- MySQL 配置 ---
86
+ MYSQL_HOST = '127.0.0.1'
87
+ MYSQL_PORT = 3306
88
+ MYSQL_USER = 'root'
89
+ MYSQL_PASSWORD = '123456'
90
+ MYSQL_DB = 'crawl'
91
+ MYSQL_TABLE = 'crawlo'
92
+ MYSQL_BATCH_SIZE = 100 # 批量插入条数
93
+
94
+ # MySQL 连接池
95
+ MYSQL_FLUSH_INTERVAL = 5 # 缓存刷新间隔(秒)
96
+ MYSQL_POOL_MIN = 5
97
+ MYSQL_POOL_MAX = 20
98
+ MYSQL_ECHO = False # 是否打印 SQL 日志
99
+
100
+ # --- MongoDB 配置 ---
101
+ MONGO_URI = 'mongodb://user:password@host:27017'
102
+ MONGO_DATABASE = 'scrapy_data'
103
+ MONGO_COLLECTION = 'crawled_items'
104
+ MONGO_MAX_POOL_SIZE = 200
105
+ MONGO_MIN_POOL_SIZE = 20
106
+
107
+ # ============================== 去重过滤配置 ==============================
108
+
109
+ # 请求指纹存储目录(文件过滤器使用)
110
+ REQUEST_DIR = '.'
111
+
112
+ # 去重过滤器类(二选一)
113
+ FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
114
+ # FILTER_CLASS = 'crawlo.filters.aioredis_filter.AioRedisFilter' # 分布式去重
115
+
116
+ # --- Redis 过滤器配置 ---
117
+ REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
118
+ REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
119
+ REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '') # 默认无密码
120
+ # 🔧 根据是否有密码生成不同的 URL 格式
121
+ if REDIS_PASSWORD:
122
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0'
123
+ else:
124
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
125
+ REDIS_KEY = 'request_fingerprint' # Redis 中存储指纹的键名
126
+ REDIS_TTL = 0 # 指纹过期时间(0 表示永不过期)
127
+ CLEANUP_FP = 0 # 程序结束时是否清理指纹(0=不清理)
128
+ FILTER_DEBUG = True # 是否开启去重调试日志
129
+ DECODE_RESPONSES = True # Redis 返回是否解码为字符串
130
+
131
+ # ============================== 中间件配置 ==============================
132
+
133
+ MIDDLEWARES = [
134
+ # === 请求预处理阶段 ===
135
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware', # 1. 忽略无效请求
136
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware', # 2. 控制请求频率
137
+ 'crawlo.middleware.default_header.DefaultHeaderMiddleware', # 3. 添加默认请求头
138
+ 'crawlo.middleware.proxy.ProxyMiddleware', # 4. 设置代理
139
+
140
+ # === 响应处理阶段 ===
141
+ 'crawlo.middleware.retry.RetryMiddleware', # 5. 失败请求重试
142
+ 'crawlo.middleware.response_code.ResponseCodeMiddleware', # 6. 处理特殊状态码
143
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware', # 7. 响应内容过滤
144
+ ]
145
+
146
+ # ============================== 扩展与管道 ==============================
147
+
148
+ # 数据处理管道(启用的存储方式)
149
+ PIPELINES = [
150
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline', # 控制台输出
151
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(可选)
152
+ ]
153
+
154
+ # 扩展组件(监控与日志)
155
+ EXTENSIONS = [
156
+ 'crawlo.extension.log_interval.LogIntervalExtension', # 定时日志
157
+ 'crawlo.extension.log_stats.LogStats', # 统计信息
158
+ 'crawlo.extension.logging_extension.CustomLoggerExtension', # 自定义日志
159
+ ]
160
+
161
+ # ============================== 日志与监控 ==============================
162
+
163
+ LOG_LEVEL = 'INFO' # 日志级别: DEBUG/INFO/WARNING/ERROR
164
+ STATS_DUMP = True # 是否周期性输出统计信息
165
+ LOG_FILE = f'logs/{PROJECT_NAME}.log' # 日志文件路径
166
+ LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
167
+ LOG_ENCODING = 'utf-8'
168
+
169
+ # ============================== 代理配置 ==============================
170
+
171
+ PROXY_ENABLED = False # 是否启用代理
172
+ PROXY_API_URL = "https://api.proxyprovider.com/get" # 代理获取接口(请替换为真实地址)
173
+
174
+ # 代理提取方式(支持字段路径或函数)
175
+ PROXY_EXTRACTOR = "proxy" # 如返回 {"proxy": "http://1.1.1.1:8080"}
176
+
177
+ # 代理刷新控制
178
+ PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
179
+ PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
180
+
181
+ # ============================== Curl-Cffi 特有配置 ==============================
182
+
183
+ # 浏览器指纹模拟(仅 CurlCffi 下载器有效)
184
+ CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
185
+
186
+ # 自定义浏览器版本映射(可覆盖默认行为)
187
+ CURL_BROWSER_VERSION_MAP = {
188
+ "chrome": "chrome136",
189
+ "edge": "edge101",
190
+ "safari": "safari184",
191
+ "firefox": "firefox135",
192
+ # 示例:旧版本测试
193
+ # "chrome_legacy": "chrome110",
194
+ }
195
+
196
+ # Curl-Cffi 优化配置
197
+ CURL_RANDOMIZE_DELAY = False # 是否启用随机延迟
198
+ CURL_RETRY_BACKOFF = True # 是否启用指数退避重试
199
+
200
+ # 默认请求头(可被 Spider 覆盖)
201
+ DEFAULT_REQUEST_HEADERS = {
202
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
203
+ '(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
204
+ }
205
+
206
+ # ============================== 下载器优化配置 ==============================
207
+
208
+ # 下载器健康检查
209
+ DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
210
+ HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
211
+
212
+ # 请求统计配置
213
+ REQUEST_STATS_ENABLED = True # 是否启用请求统计
214
+ STATS_RESET_ON_START = False # 启动时是否重置统计
215
+
216
+ # HttpX 下载器专用配置
217
+ HTTPX_HTTP2 = True # 是否启用HTTP/2支持
218
+ HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
219
+
220
+ # AioHttp 下载器专用配置
221
+ AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
222
+ AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
223
+
224
+ # 通用优化配置
225
+ CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
226
+ CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)