crawlo 1.2.5__py3-none-any.whl → 1.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,118 +1,91 @@
1
1
  # -*- coding: UTF-8 -*-
2
2
  """
3
- {{project_name}} 项目配置文件(温和版)
4
- =============================
5
- 基于 Crawlo 框架的温和爬虫项目配置。
6
- 适合对目标网站友好的低负载爬取。
3
+ 温和模式配置模板
4
+ 低负载配置,对目标网站友好
7
5
  """
8
- import os
9
- from crawlo.config import CrawloConfig
10
6
 
11
7
  # ============================== 项目基本信息 ==============================
12
8
  PROJECT_NAME = '{{project_name}}'
13
9
 
14
- # ============================== 温和模式配置说明 ==============================
15
- #
16
- # 本模板专为对目标网站友好的低负载爬取设计,适用于以下场景:
17
- # - 需要避免对目标网站造成过大压力
18
- # - 长时间运行的监控类爬虫
19
- # - 对目标网站有友好性要求的项目
20
- #
21
- # 运行模式特点:
22
- # - RUN_MODE = 'standalone'(单机模式)
23
- # - QUEUE_TYPE = 'memory'(使用内存队列)
24
- # - FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'(内存过滤器)
25
- # - DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'(内存去重)
26
- #
27
- # 配置特点:
28
- # - 低并发:CONCURRENCY = 2
29
- # - 高延迟:DOWNLOAD_DELAY = 3.0秒
30
- # - 随机化:启用RANDOMNESS增加随机性
31
- # - 连接池限制:减少连接数避免给服务器造成压力
32
- #
33
- # 扩展建议:
34
- # - 如需跨会话去重,可将FILTER_CLASS和DEFAULT_DEDUP_PIPELINE改为Redis实现
35
- # - 可根据目标网站特性调整DOWNLOAD_DELAY和RANDOM_RANGE参数
36
- #
37
- # 🎯 最佳使用方式:
38
- # 推荐使用配置工厂方式创建温和模式配置:
39
- # from crawlo.config import CrawloConfig
40
- # config = CrawloConfig.presets().gentle()
41
- # process = CrawlerProcess(settings=config.to_dict())
42
-
43
- # ============================== 温和模式配置 ==============================
44
- # 使用配置工厂创建温和模式配置
45
- CONFIG = CrawloConfig.presets().gentle()
46
-
47
- # 获取配置
48
- locals().update(CONFIG.to_dict())
49
-
50
- # ============================== 网络请求配置 ==============================
51
- DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader"
52
- DOWNLOAD_TIMEOUT = 60
53
- VERIFY_SSL = True
54
-
55
- # ============================== 低并发配置 ==============================
56
- CONCURRENCY = 2
57
- MAX_RUNNING_SPIDERS = 1
58
- DOWNLOAD_DELAY = 3.0
59
- RANDOMNESS = True
60
- RANDOM_RANGE = (2.0, 5.0)
61
-
62
- # ============================== 连接池配置 ==============================
63
- CONNECTION_POOL_LIMIT = 10
64
- CONNECTION_POOL_LIMIT_PER_HOST = 5
65
-
66
- # ============================== 重试配置 ==============================
67
- MAX_RETRY_TIMES = 3
68
- RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524]
69
- IGNORE_HTTP_CODES = [403, 404]
10
+ # ============================== 温和运行模式 ==============================
11
+ # 运行模式:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
12
+ RUN_MODE = 'standalone' # 单机模式 - 适用于开发和小规模数据采集
13
+
14
+ # 并发配置
15
+ CONCURRENCY = 2 # 极低并发数以减少目标网站压力
16
+ DOWNLOAD_DELAY = 3.0 # 增加延迟以降低目标网站压力
17
+ RANDOMNESS = True # 启用随机延迟
18
+ RANDOM_RANGE = (0.5, 2.0) # 随机延迟范围
70
19
 
71
20
  # ============================== 队列配置 ==============================
72
- SCHEDULER_MAX_QUEUE_SIZE = 1000
21
+
22
+ # 队列类型:'auto'(自动选择), 'memory'(内存队列), 'redis'(分布式队列)
23
+ QUEUE_TYPE = 'auto' # 自动检测,如果Redis可用则使用Redis队列
24
+ SCHEDULER_MAX_QUEUE_SIZE = 500
25
+ SCHEDULER_QUEUE_NAME = f'crawlo:{{project_name}}:queue:requests'
73
26
  QUEUE_MAX_RETRIES = 3
74
27
  QUEUE_TIMEOUT = 300
75
28
 
76
- # ============================== 数据存储配置 ==============================
77
- # MySQL 配置
78
- MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
79
- MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
80
- MYSQL_USER = os.getenv('MYSQL_USER', 'root')
81
- MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
82
- MYSQL_DB = os.getenv('MYSQL_DB', '{{project_name}}')
83
- MYSQL_TABLE = '{{project_name}}_data'
84
-
85
- # MongoDB 配置
86
- MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
87
- MONGO_DATABASE = '{{project_name}}_db'
88
- MONGO_COLLECTION = '{{project_name}}_items'
89
-
90
- # ============================== 去重配置 ==============================
91
- # 明确指定温和模式下使用内存去重管道
29
+ # ============================== 去重过滤配置 ==============================
30
+
31
+ # 温和模式下使用内存去重管道和过滤器
92
32
  DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'
93
33
  FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
34
+
35
+ # --- Redis 配置(用于分布式去重和队列) ---
36
+ REDIS_HOST = '127.0.0.1'
37
+ REDIS_PORT = 6379
38
+ REDIS_PASSWORD = '' # 如果有密码,请填写
39
+
40
+ # 根据是否有密码生成 URL
41
+ if REDIS_PASSWORD:
42
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0'
43
+ else:
44
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
45
+
46
+ # Redis key配置已移至各组件中,使用统一的命名规范
47
+ # crawlo:{project_name}:filter:fingerprint (请求去重)
48
+ # crawlo:{project_name}:item:fingerprint (数据项去重)
49
+ # crawlo:{project_name}:queue:requests (请求队列)
50
+ # crawlo:{project_name}:queue:processing (处理中队列)
51
+ # crawlo:{project_name}:queue:failed (失败队列)
52
+
94
53
  REDIS_TTL = 0
95
54
  CLEANUP_FP = 0
96
55
  FILTER_DEBUG = True
56
+ DECODE_RESPONSES = True
57
+
58
+ # ============================== 中间件配置 ==============================
97
59
 
98
- # ============================== 中间件与管道 ==============================
99
60
  MIDDLEWARES = [
61
+ # === 请求预处理阶段 ===
100
62
  'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
101
63
  'crawlo.middleware.download_delay.DownloadDelayMiddleware',
102
64
  'crawlo.middleware.default_header.DefaultHeaderMiddleware',
65
+ 'crawlo.middleware.proxy.ProxyMiddleware',
66
+ 'crawlo.middleware.offsite.OffsiteMiddleware',
67
+
68
+ # === 响应处理阶段 ===
103
69
  'crawlo.middleware.retry.RetryMiddleware',
104
70
  'crawlo.middleware.response_code.ResponseCodeMiddleware',
71
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware',
105
72
  ]
106
73
 
74
+ # ============================== 数据管道配置 ==============================
75
+
76
+ # 数据处理管道(启用的存储方式)
107
77
  PIPELINES = [
108
78
  'crawlo.pipelines.console_pipeline.ConsolePipeline',
109
- # '{{project_name}}.pipelines.DatabasePipeline',
79
+ # '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
80
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
81
+ # 'crawlo.pipelines.mongo_pipeline.MongoPipeline', # MongoDB 存储
110
82
  ]
111
83
 
112
- # 明确添加内存去重管道到管道列表开头
84
+ # 明确添加默认去重管道到管道列表开头
113
85
  PIPELINES.insert(0, DEFAULT_DEDUP_PIPELINE)
114
86
 
115
87
  # ============================== 扩展组件 ==============================
88
+
116
89
  EXTENSIONS = [
117
90
  'crawlo.extension.log_interval.LogIntervalExtension',
118
91
  'crawlo.extension.log_stats.LogStats',
@@ -120,9 +93,9 @@ EXTENSIONS = [
120
93
  ]
121
94
 
122
95
  # ============================== 日志配置 ==============================
96
+
123
97
  LOG_LEVEL = 'INFO'
124
- LOG_FILE = f'logs/{{project_name}}.log'
125
98
  STATS_DUMP = True
126
-
127
- # ============================== 自定义配置 ==============================
128
- # 在此处添加项目特定的配置项
99
+ LOG_FILE = f'logs/{{project_name}}.log'
100
+ LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
101
+ LOG_ENCODING = 'utf-8'
@@ -1,150 +1,135 @@
1
1
  # -*- coding: UTF-8 -*-
2
2
  """
3
- {{project_name}} 项目配置文件(高性能版)
4
- =============================
5
- 基于 Crawlo 框架的高性能爬虫项目配置。
6
- 针对大规模、高并发场景优化。
3
+ 高性能模式配置模板
4
+ 针对大规模高并发优化
7
5
  """
8
- import os
9
- from crawlo.config import CrawloConfig
10
6
 
11
7
  # ============================== 项目基本信息 ==============================
12
8
  PROJECT_NAME = '{{project_name}}'
13
9
 
14
- # ============================== 高性能配置 ==============================
15
- # 使用配置工厂创建高性能配置
16
- CONFIG = CrawloConfig.presets().large_scale(
17
- redis_host=os.getenv('REDIS_HOST', '127.0.0.1'),
18
- project_name='{{project_name}}'
19
- )
20
-
21
- # 获取配置
22
- locals().update(CONFIG.to_dict())
23
-
24
- # ============================== 网络请求配置 ==============================
25
- DOWNLOADER = "crawlo.downloader.cffi_downloader.CurlCffiDownloader"
26
- DOWNLOAD_TIMEOUT = 30
27
- VERIFY_SSL = True
28
- USE_SESSION = True
29
-
30
- # ============================== 高并发配置 ==============================
31
- CONCURRENCY = 32
32
- MAX_RUNNING_SPIDERS = 10
33
- DOWNLOAD_DELAY = 0.5
34
- RANDOMNESS = True
35
- RANDOM_RANGE = (0.8, 1.2)
36
-
37
- # ============================== 连接池配置 ==============================
38
- CONNECTION_POOL_LIMIT = 100
39
- CONNECTION_POOL_LIMIT_PER_HOST = 50
10
+ # ============================== 高性能运行模式 ==============================
11
+ # 运行模式:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
12
+ RUN_MODE = 'standalone' # 单机模式 - 适用于开发和小规模数据采集
40
13
 
41
- # ============================== 重试配置 ==============================
42
- MAX_RETRY_TIMES = 5
43
- RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524]
44
- IGNORE_HTTP_CODES = [403, 404]
14
+ # 并发配置
15
+ CONCURRENCY = 32 # 高并发数以充分利用系统资源
16
+ DOWNLOAD_DELAY = 0.1 # 极小延迟以提高吞吐量
17
+ RANDOMNESS = False # 禁用随机延迟以保证性能
45
18
 
46
19
  # ============================== 队列配置 ==============================
47
- SCHEDULER_MAX_QUEUE_SIZE = 10000
20
+
21
+ # 队列类型:'auto'(自动选择), 'memory'(内存队列), 'redis'(分布式队列)
22
+ QUEUE_TYPE = 'auto' # 自动检测,如果Redis可用则使用Redis队列
23
+ SCHEDULER_MAX_QUEUE_SIZE = 5000
48
24
  SCHEDULER_QUEUE_NAME = f'crawlo:{{project_name}}:queue:requests'
49
- QUEUE_MAX_RETRIES = 5
25
+ QUEUE_MAX_RETRIES = 3
50
26
  QUEUE_TIMEOUT = 300
51
- LARGE_SCALE_BATCH_SIZE = 2000
52
- LARGE_SCALE_CHECKPOINT_INTERVAL = 5000
53
27
 
54
- # ============================== Redis 配置 ==============================
55
- REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
56
- REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
57
- REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
58
- REDIS_DB = int(os.getenv('REDIS_DB', 0))
28
+ # ============================== 去重过滤配置 ==============================
29
+
30
+ # 高性能模式下,如果Redis可用则使用Redis去重,否则使用内存去重
31
+ DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.redis_dedup_pipeline.RedisDedupPipeline'
32
+ FILTER_CLASS = 'crawlo.filters.aioredis_filter.AioRedisFilter'
33
+
34
+ # --- Redis 配置(用于分布式去重和队列) ---
35
+ REDIS_HOST = '127.0.0.1'
36
+ REDIS_PORT = 6379
37
+ REDIS_PASSWORD = '' # 如果有密码,请填写
59
38
 
60
39
  # 根据是否有密码生成 URL
61
40
  if REDIS_PASSWORD:
62
- REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
41
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0'
63
42
  else:
64
- REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
65
-
66
- # ============================== 数据存储配置 ==============================
67
- # MySQL 配置
68
- MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
69
- MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
70
- MYSQL_USER = os.getenv('MYSQL_USER', 'root')
71
- MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
72
- MYSQL_DB = os.getenv('MYSQL_DB', '{{project_name}}')
73
- MYSQL_TABLE = '{{project_name}}_data'
74
- MYSQL_BATCH_SIZE = 200
75
- MYSQL_USE_BATCH = True
76
- MYSQL_POOL_MIN = 10
77
- MYSQL_POOL_MAX = 50
78
-
79
- # MongoDB 配置
80
- MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
81
- MONGO_DATABASE = '{{project_name}}_db'
82
- MONGO_COLLECTION = '{{project_name}}_items'
83
- MONGO_BATCH_SIZE = 200
84
- MONGO_USE_BATCH = True
85
- MONGO_MAX_POOL_SIZE = 300
86
- MONGO_MIN_POOL_SIZE = 50
87
-
88
- # ============================== 去重配置 ==============================
43
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
44
+
45
+ # Redis key配置已移至各组件中,使用统一的命名规范
46
+ # crawlo:{project_name}:filter:fingerprint (请求去重)
47
+ # crawlo:{project_name}:item:fingerprint (数据项去重)
48
+ # crawlo:{project_name}:queue:requests (请求队列)
49
+ # crawlo:{project_name}:queue:processing (处理中队列)
50
+ # crawlo:{project_name}:queue:failed (失败队列)
51
+
89
52
  REDIS_TTL = 0
90
53
  CLEANUP_FP = 0
91
- FILTER_DEBUG = False # 生产环境关闭调试日志
54
+ FILTER_DEBUG = True
55
+ DECODE_RESPONSES = True
56
+
57
+ # ============================== 中间件配置 ==============================
92
58
 
93
- # ============================== 中间件与管道 ==============================
94
59
  MIDDLEWARES = [
60
+ # === 请求预处理阶段 ===
95
61
  'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
96
62
  'crawlo.middleware.download_delay.DownloadDelayMiddleware',
97
63
  'crawlo.middleware.default_header.DefaultHeaderMiddleware',
98
64
  'crawlo.middleware.proxy.ProxyMiddleware',
65
+ 'crawlo.middleware.offsite.OffsiteMiddleware',
66
+
67
+ # === 响应处理阶段 ===
99
68
  'crawlo.middleware.retry.RetryMiddleware',
100
69
  'crawlo.middleware.response_code.ResponseCodeMiddleware',
101
70
  'crawlo.middleware.response_filter.ResponseFilterMiddleware',
102
71
  ]
103
72
 
73
+ # ============================== 数据管道配置 ==============================
74
+
75
+ # 数据处理管道(启用的存储方式)
104
76
  PIPELINES = [
105
77
  'crawlo.pipelines.console_pipeline.ConsolePipeline',
106
- # '{{project_name}}.pipelines.DatabasePipeline',
107
- # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline',
108
- # 'crawlo.pipelines.mongo_pipeline.MongoPipeline',
78
+ # '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
79
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
80
+ # 'crawlo.pipelines.mongo_pipeline.MongoPipeline', # MongoDB 存储
109
81
  ]
110
82
 
83
+ # 明确添加默认去重管道到管道列表开头
84
+ PIPELINES.insert(0, DEFAULT_DEDUP_PIPELINE)
85
+
111
86
  # ============================== 扩展组件 ==============================
87
+
112
88
  EXTENSIONS = [
113
89
  'crawlo.extension.log_interval.LogIntervalExtension',
114
90
  'crawlo.extension.log_stats.LogStats',
115
91
  'crawlo.extension.logging_extension.CustomLoggerExtension',
116
- # 'crawlo.extension.memory_monitor.MemoryMonitorExtension',
117
- # 'crawlo.extension.request_recorder.RequestRecorderExtension',
118
- # 'crawlo.extension.performance_profiler.PerformanceProfilerExtension',
92
+ # 'crawlo.extension.memory_monitor.MemoryMonitorExtension', # 内存监控
93
+ # 'crawlo.extension.request_recorder.RequestRecorderExtension', # 请求记录
94
+ # 'crawlo.extension.performance_profiler.PerformanceProfilerExtension', # 性能分析
95
+ # 'crawlo.extension.health_check.HealthCheckExtension', # 健康检查
119
96
  ]
120
97
 
121
98
  # ============================== 日志配置 ==============================
99
+
122
100
  LOG_LEVEL = 'INFO'
123
- LOG_FILE = f'logs/{{project_name}}.log'
124
101
  STATS_DUMP = True
102
+ LOG_FILE = f'logs/{{project_name}}.log'
103
+ LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
104
+ LOG_ENCODING = 'utf-8'
105
+
106
+ # ============================== 性能优化配置 ==============================
107
+
108
+ # 连接池配置
109
+ CONNECTION_POOL_LIMIT = 100
110
+ DOWNLOAD_MAXSIZE = 10 * 1024 * 1024 # 10MB
111
+ DOWNLOAD_WARN_SIZE = 1024 * 1024 # 1MB
112
+
113
+ # 下载器优化配置
114
+ DOWNLOADER_HEALTH_CHECK = True
115
+ HEALTH_CHECK_INTERVAL = 30
116
+
117
+ # 请求统计配置
118
+ REQUEST_STATS_ENABLED = True
119
+ STATS_RESET_ON_START = False
125
120
 
126
- # ============================== 代理配置 ==============================
127
- PROXY_ENABLED = False
128
- PROXY_API_URL = ""
129
- PROXY_EXTRACTOR = "proxy"
130
- PROXY_REFRESH_INTERVAL = 30
131
- PROXY_API_TIMEOUT = 5
132
-
133
- # ============================== 浏览器指纹配置 ==============================
134
- CURL_BROWSER_TYPE = "chrome"
135
- CURL_BROWSER_VERSION_MAP = {
136
- "chrome": "chrome136",
137
- "edge": "edge101",
138
- "safari": "safari184",
139
- "firefox": "firefox135",
140
- }
141
-
142
- # ============================== 下载器优化配置 ==============================
121
+ # HttpX 下载器专用配置
143
122
  HTTPX_HTTP2 = True
144
123
  HTTPX_FOLLOW_REDIRECTS = True
124
+
125
+ # AioHttp 下载器专用配置
145
126
  AIOHTTP_AUTO_DECOMPRESS = True
127
+ AIOHTTP_FORCE_CLOSE = False
128
+
129
+ # 通用优化配置
146
130
  CONNECTION_TTL_DNS_CACHE = 300
147
131
  CONNECTION_KEEPALIVE_TIMEOUT = 15
148
132
 
149
- # ============================== 自定义配置 ==============================
150
- # 在此处添加项目特定的配置项
133
+ # 性能监控
134
+ ENABLE_PERFORMANCE_MONITORING = True
135
+ MEMORY_USAGE_WARNING_THRESHOLD = 800 # MB
@@ -1,103 +1,99 @@
1
1
  # -*- coding: UTF-8 -*-
2
2
  """
3
- {{project_name}} 项目配置文件(简化版)
4
- =============================
5
- 基于 Crawlo 框架的简化爬虫项目配置。
6
- 适合快速开始和小型项目。
3
+ 简化模式配置模板
4
+ 最小配置,适合快速开始和简单项目
7
5
  """
8
- import os
9
- from crawlo.config import CrawloConfig
10
6
 
11
7
  # ============================== 项目基本信息 ==============================
12
8
  PROJECT_NAME = '{{project_name}}'
13
9
 
14
- # ============================== 简化版配置说明 ==============================
15
- #
16
- # 本模板专为快速开始和小型项目设计,适用于以下场景:
17
- # - 学习和实验目的
18
- # - 小规模数据采集任务
19
- # - 个人项目或原型开发
20
- #
21
- # 运行模式特点:
22
- # - RUN_MODE = 'standalone'(单机模式)
23
- # - QUEUE_TYPE = 'memory'(使用内存队列)
24
- # - FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'(内存过滤器)
25
- # - DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'(内存去重)
26
- #
27
- # 配置优势:
28
- # - 简单易用,开箱即用
29
- # - 无需额外依赖(如Redis)
30
- # - 资源占用少,适合个人开发环境
31
- #
32
- # 扩展建议:
33
- # - 如需跨会话去重,可将FILTER_CLASS和DEFAULT_DEDUP_PIPELINE改为Redis实现
34
- # - 如需更高性能,可调整CONCURRENCY和DOWNLOAD_DELAY参数
35
- #
36
- # 🎯 最佳使用方式:
37
- # 推荐使用配置工厂方式创建基本配置:
38
- # from crawlo.config import CrawloConfig
39
- # config = CrawloConfig.standalone()
40
- # process = CrawlerProcess(settings=config.to_dict())
41
-
42
- # ============================== 基本配置 ==============================
43
- # 使用配置工厂创建基本配置
44
- CONFIG = CrawloConfig.standalone(
45
- concurrency=4,
46
- download_delay=1.0
47
- )
48
-
49
- # 获取配置
50
- locals().update(CONFIG.to_dict())
51
-
52
- # ============================== 网络请求配置 ==============================
53
- DOWNLOADER = "crawlo.downloader.httpx_downloader.HttpXDownloader"
54
- DOWNLOAD_TIMEOUT = 30
55
- VERIFY_SSL = True
56
-
57
- # ============================== 并发配置 ==============================
58
- CONCURRENCY = 4
59
- DOWNLOAD_DELAY = 1.0
60
-
61
- # ============================== 数据存储配置 ==============================
62
- # MySQL 配置
63
- MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
64
- MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
65
- MYSQL_USER = os.getenv('MYSQL_USER', 'root')
66
- MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
67
- MYSQL_DB = os.getenv('MYSQL_DB', '{{project_name}}')
68
- MYSQL_TABLE = '{{project_name}}_data'
69
-
70
- # MongoDB 配置
71
- MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
72
- MONGO_DATABASE = '{{project_name}}_db'
73
- MONGO_COLLECTION = '{{project_name}}_items'
74
-
75
- # ============================== 去重配置 ==============================
76
- # 明确指定简化模式下使用内存去重管道
10
+ # ============================== 简化运行模式 ==============================
11
+ # 运行模式:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
12
+ RUN_MODE = 'standalone' # 单机模式 - 适用于开发和小规模数据采集
13
+
14
+ # 并发配置
15
+ CONCURRENCY = 4 # 低并发数以减少资源占用
16
+ DOWNLOAD_DELAY = 1.0 # 增加延迟以降低目标网站压力
17
+
18
+ # ============================== 队列配置 ==============================
19
+
20
+ # 队列类型:'auto'(自动选择), 'memory'(内存队列), 'redis'(分布式队列)
21
+ QUEUE_TYPE = 'auto' # 自动检测,如果Redis可用则使用Redis队列
22
+ SCHEDULER_MAX_QUEUE_SIZE = 1000
23
+ SCHEDULER_QUEUE_NAME = f'crawlo:{{project_name}}:queue:requests'
24
+ QUEUE_MAX_RETRIES = 3
25
+ QUEUE_TIMEOUT = 300
26
+
27
+ # ============================== 去重过滤配置 ==============================
28
+
29
+ # 简化模式下使用内存去重管道和过滤器
77
30
  DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'
78
31
  FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
79
32
 
80
- # ============================== 中间件与管道 ==============================
33
+ # --- Redis 配置(用于分布式去重和队列) ---
34
+ REDIS_HOST = '127.0.0.1'
35
+ REDIS_PORT = 6379
36
+ REDIS_PASSWORD = '' # 如果有密码,请填写
37
+
38
+ # 根据是否有密码生成 URL
39
+ if REDIS_PASSWORD:
40
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0'
41
+ else:
42
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
43
+
44
+ # Redis key配置已移至各组件中,使用统一的命名规范
45
+ # crawlo:{project_name}:filter:fingerprint (请求去重)
46
+ # crawlo:{project_name}:item:fingerprint (数据项去重)
47
+ # crawlo:{project_name}:queue:requests (请求队列)
48
+ # crawlo:{project_name}:queue:processing (处理中队列)
49
+ # crawlo:{project_name}:queue:failed (失败队列)
50
+
51
+ REDIS_TTL = 0
52
+ CLEANUP_FP = 0
53
+ FILTER_DEBUG = True
54
+ DECODE_RESPONSES = True
55
+
56
+ # ============================== 中间件配置 ==============================
57
+
81
58
  MIDDLEWARES = [
59
+ # === 请求预处理阶段 ===
82
60
  'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
83
61
  'crawlo.middleware.download_delay.DownloadDelayMiddleware',
84
62
  'crawlo.middleware.default_header.DefaultHeaderMiddleware',
63
+ 'crawlo.middleware.proxy.ProxyMiddleware',
64
+ 'crawlo.middleware.offsite.OffsiteMiddleware',
65
+
66
+ # === 响应处理阶段 ===
85
67
  'crawlo.middleware.retry.RetryMiddleware',
86
68
  'crawlo.middleware.response_code.ResponseCodeMiddleware',
69
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware',
87
70
  ]
88
71
 
72
+ # ============================== 数据管道配置 ==============================
73
+
74
+ # 数据处理管道(启用的存储方式)
89
75
  PIPELINES = [
90
76
  'crawlo.pipelines.console_pipeline.ConsolePipeline',
91
- # '{{project_name}}.pipelines.DatabasePipeline',
77
+ # '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
78
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
79
+ # 'crawlo.pipelines.mongo_pipeline.MongoPipeline', # MongoDB 存储
92
80
  ]
93
81
 
94
- # 明确添加内存去重管道到管道列表开头
82
+ # 明确添加默认去重管道到管道列表开头
95
83
  PIPELINES.insert(0, DEFAULT_DEDUP_PIPELINE)
96
84
 
85
+ # ============================== 扩展组件 ==============================
86
+
87
+ EXTENSIONS = [
88
+ 'crawlo.extension.log_interval.LogIntervalExtension',
89
+ 'crawlo.extension.log_stats.LogStats',
90
+ 'crawlo.extension.logging_extension.CustomLoggerExtension',
91
+ ]
92
+
97
93
  # ============================== 日志配置 ==============================
94
+
98
95
  LOG_LEVEL = 'INFO'
99
- LOG_FILE = f'logs/{{project_name}}.log'
100
96
  STATS_DUMP = True
101
-
102
- # ============================== 自定义配置 ==============================
103
- # 在此处添加项目特定的配置项
97
+ LOG_FILE = f'logs/{{project_name}}.log'
98
+ LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
99
+ LOG_ENCODING = 'utf-8'
@@ -26,9 +26,7 @@ def main():
26
26
 
27
27
  # 创建爬虫进程(自动加载默认配置)
28
28
  try:
29
- # 确保 spider 模块被正确导入
30
- spider_modules = ['{{project_name}}.spiders']
31
- process = CrawlerProcess(spider_modules=spider_modules)
29
+ process = CrawlerProcess()
32
30
  print("✅ 爬虫进程初始化成功")
33
31
 
34
32
  # 运行固定的爬虫
@@ -37,6 +37,7 @@ class OptimizedRedisConnectionPool:
37
37
  # 连接池实例
38
38
  self._connection_pool: Optional[aioredis.ConnectionPool] = None
39
39
  self._redis_client: Optional[aioredis.Redis] = None
40
+ self._connection_tested = False # 标记是否已测试连接
40
41
 
41
42
  # 连接池统计信息
42
43
  self._stats = {
@@ -61,7 +62,8 @@ class OptimizedRedisConnectionPool:
61
62
  connection_pool=self._connection_pool
62
63
  )
63
64
 
64
- self.logger.info(f"✅ Redis连接池初始化成功: {self.redis_url}")
65
+ # 只在调试模式下输出详细连接池信息
66
+ self.logger.debug(f"✅ Redis连接池初始化成功: {self.redis_url}")
65
67
  self.logger.debug(f" 连接池配置: {self.config}")
66
68
 
67
69
  except Exception as e:
@@ -71,6 +73,18 @@ class OptimizedRedisConnectionPool:
71
73
  raise_error=True
72
74
  )
73
75
 
76
+ async def _test_connection(self):
77
+ """测试Redis连接"""
78
+ if self._redis_client and not self._connection_tested:
79
+ try:
80
+ await self._redis_client.ping()
81
+ self._connection_tested = True
82
+ # 只在调试模式下输出连接测试成功信息
83
+ self.logger.debug(f"✅ Redis连接测试成功: {self.redis_url}")
84
+ except Exception as e:
85
+ self.logger.error(f"❌ Redis连接测试失败: {self.redis_url} - {e}")
86
+ raise
87
+
74
88
  async def get_connection(self) -> aioredis.Redis:
75
89
  """
76
90
  获取Redis连接实例
@@ -79,7 +93,10 @@ class OptimizedRedisConnectionPool:
79
93
  Redis连接实例
80
94
  """
81
95
  if not self._redis_client:
82
- await self._initialize_pool()
96
+ self._initialize_pool()
97
+
98
+ # 确保连接有效
99
+ await self._test_connection()
83
100
 
84
101
  self._stats['active_connections'] += 1
85
102
  return self._redis_client
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: crawlo
3
- Version: 1.2.5
3
+ Version: 1.2.6
4
4
  Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
5
  Home-page: https://github.com/crawl-coder/Crawlo.git
6
6
  Author: crawl-coder