crawlo 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (128) hide show
  1. crawlo/__init__.py +34 -33
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +152 -126
  7. crawlo/commands/list.py +156 -147
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -111
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -57
  18. crawlo/crawler.py +1028 -495
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +251 -241
  22. crawlo/downloader/httpx_downloader.py +259 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +271 -166
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +317 -0
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +219 -0
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/__init__.py +0 -0
  62. crawlo/queue/pqueue.py +37 -0
  63. crawlo/queue/queue_manager.py +308 -0
  64. crawlo/queue/redis_priority_queue.py +209 -0
  65. crawlo/settings/__init__.py +7 -7
  66. crawlo/settings/default_settings.py +245 -167
  67. crawlo/settings/setting_manager.py +99 -99
  68. crawlo/spider/__init__.py +639 -129
  69. crawlo/stats_collector.py +59 -59
  70. crawlo/subscriber.py +106 -106
  71. crawlo/task_manager.py +30 -27
  72. crawlo/templates/crawlo.cfg.tmpl +10 -10
  73. crawlo/templates/project/__init__.py.tmpl +3 -3
  74. crawlo/templates/project/items.py.tmpl +17 -17
  75. crawlo/templates/project/middlewares.py.tmpl +87 -76
  76. crawlo/templates/project/pipelines.py.tmpl +342 -64
  77. crawlo/templates/project/run.py.tmpl +252 -0
  78. crawlo/templates/project/settings.py.tmpl +251 -54
  79. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  80. crawlo/templates/spider/spider.py.tmpl +178 -32
  81. crawlo/utils/__init__.py +7 -7
  82. crawlo/utils/controlled_spider_mixin.py +440 -0
  83. crawlo/utils/date_tools.py +233 -233
  84. crawlo/utils/db_helper.py +343 -343
  85. crawlo/utils/func_tools.py +82 -82
  86. crawlo/utils/large_scale_config.py +287 -0
  87. crawlo/utils/large_scale_helper.py +344 -0
  88. crawlo/utils/log.py +128 -128
  89. crawlo/utils/queue_helper.py +176 -0
  90. crawlo/utils/request.py +267 -267
  91. crawlo/utils/request_serializer.py +220 -0
  92. crawlo/utils/spider_loader.py +62 -62
  93. crawlo/utils/system.py +11 -11
  94. crawlo/utils/tools.py +4 -4
  95. crawlo/utils/url.py +39 -39
  96. crawlo-1.1.3.dist-info/METADATA +635 -0
  97. crawlo-1.1.3.dist-info/RECORD +113 -0
  98. examples/__init__.py +7 -7
  99. examples/controlled_spider_example.py +205 -0
  100. tests/__init__.py +7 -7
  101. tests/test_final_validation.py +154 -0
  102. tests/test_proxy_health_check.py +32 -32
  103. tests/test_proxy_middleware_integration.py +136 -136
  104. tests/test_proxy_providers.py +56 -56
  105. tests/test_proxy_stats.py +19 -19
  106. tests/test_proxy_strategies.py +59 -59
  107. tests/test_redis_config.py +29 -0
  108. tests/test_redis_queue.py +225 -0
  109. tests/test_request_serialization.py +71 -0
  110. tests/test_scheduler.py +242 -0
  111. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  112. crawlo/utils/pqueue.py +0 -174
  113. crawlo-1.1.1.dist-info/METADATA +0 -220
  114. crawlo-1.1.1.dist-info/RECORD +0 -100
  115. examples/baidu_spider/__init__.py +0 -7
  116. examples/baidu_spider/demo.py +0 -94
  117. examples/baidu_spider/items.py +0 -46
  118. examples/baidu_spider/middleware.py +0 -49
  119. examples/baidu_spider/pipeline.py +0 -55
  120. examples/baidu_spider/run.py +0 -27
  121. examples/baidu_spider/settings.py +0 -121
  122. examples/baidu_spider/spiders/__init__.py +0 -7
  123. examples/baidu_spider/spiders/bai_du.py +0 -61
  124. examples/baidu_spider/spiders/miit.py +0 -159
  125. examples/baidu_spider/spiders/sina.py +0 -79
  126. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  127. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  128. {crawlo-1.1.1.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,273 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import asyncio
3
- import aiomysql
4
- from typing import Optional, List, Dict
5
- from asyncmy import create_pool
6
- from crawlo.utils.log import get_logger
7
- from crawlo.exceptions import ItemDiscard
8
- from crawlo.utils.tools import make_insert_sql, logger
9
-
10
-
11
- class AsyncmyMySQLPipeline:
12
- def __init__(self, crawler):
13
- self.crawler = crawler
14
- self.settings = crawler.settings
15
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
16
-
17
- # 配置参数
18
- self.table_name = (
19
- self.settings.get('MYSQL_TABLE') or
20
- getattr(crawler.spider, 'mysql_table', None) or
21
- f"{crawler.spider.name}_items"
22
- )
23
- self.batch_size = self.settings.getint('MYSQL_BATCH_SIZE', 100)
24
- self.flush_interval = self.settings.getfloat('MYSQL_FLUSH_INTERVAL', 3.0) # 秒
25
-
26
- # 连接池相关
27
- self._pool_lock = asyncio.Lock()
28
- self._pool_initialized = False
29
- self.pool = None
30
-
31
- # 缓冲区与锁
32
- self.items_buffer: List[Dict] = []
33
- self.buffer_lock = asyncio.Lock()
34
-
35
- # 后台任务
36
- self.flush_task: Optional[asyncio.Task] = None
37
-
38
- # 注册关闭事件
39
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
40
-
41
- @classmethod
42
- def from_crawler(cls, crawler):
43
- return cls(crawler)
44
-
45
- async def _ensure_pool(self):
46
- """确保连接池已初始化(线程安全)"""
47
- if self._pool_initialized:
48
- return
49
-
50
- async with self._pool_lock:
51
- if not self._pool_initialized:
52
- try:
53
- self.pool = await create_pool(
54
- host=self.settings.get('MYSQL_HOST', 'localhost'),
55
- port=self.settings.get_int('MYSQL_PORT', 3306),
56
- user=self.settings.get('MYSQL_USER', 'root'),
57
- password=self.settings.get('MYSQL_PASSWORD', ''),
58
- db=self.settings.get('MYSQL_DB', 'scrapy_db'),
59
- minsize=self.settings.get_int('MYSQL_POOL_MIN', 3),
60
- maxsize=self.settings.get_int('MYSQL_POOL_MAX', 10),
61
- echo=self.settings.get_bool('MYSQL_ECHO', False)
62
- )
63
- self._pool_initialized = True
64
- self.logger.debug(f"MySQL连接池初始化完成(表: {self.table_name})")
65
- except Exception as e:
66
- self.logger.error(f"MySQL连接池初始化失败: {e}")
67
- raise
68
-
69
- async def open_spider(self, spider):
70
- """爬虫启动时初始化后台刷新任务"""
71
- await self._ensure_pool()
72
- self.flush_task = asyncio.create_task(self._flush_loop())
73
-
74
- async def _flush_loop(self):
75
- """后台循环:定期检查是否需要刷新缓冲区"""
76
- while True:
77
- await asyncio.sleep(self.flush_interval)
78
- if len(self.items_buffer) > 0:
79
- await self._flush_buffer()
80
-
81
- async def _flush_buffer(self):
82
- """将缓冲区中的数据批量写入数据库"""
83
- async with self.buffer_lock:
84
- if not self.items_buffer:
85
- return
86
-
87
- items_to_insert = self.items_buffer.copy()
88
- self.items_buffer.clear()
89
-
90
- try:
91
- await self._ensure_pool()
92
- first_item = items_to_insert[0]
93
- sql = make_insert_sql(table=self.table_name, data=first_item, many=True)
94
-
95
- values = [list(item.values()) for item in items_to_insert]
96
-
97
- async with self.pool.acquire() as conn:
98
- async with conn.cursor() as cursor:
99
- affected_rows = await cursor.executemany(sql, values)
100
- await conn.commit()
101
-
102
- spider_name = getattr(self.crawler.spider, 'name', 'unknown')
103
- self.logger.info(f"批量插入 {affected_rows} 条记录到 {self.table_name}")
104
- self.crawler.stats.inc_value('mysql/insert_success_batch', len(items_to_insert))
105
-
106
- except Exception as e:
107
- self.logger.error(f"批量插入失败: {e}")
108
- self.crawler.stats.inc_value('mysql/insert_failed_batch', len(items_to_insert))
109
- # 可选:重试或丢弃
110
- raise ItemDiscard(f"批量插入失败: {e}")
111
-
112
- async def process_item(self, item, spider, kwargs=None) -> dict:
113
- """将 item 添加到缓冲区,触发批量插入"""
114
- item_dict = dict(item)
115
-
116
- async with self.buffer_lock:
117
- self.items_buffer.append(item_dict)
118
- if len(self.items_buffer) >= self.batch_size:
119
- # 达到批量阈值,立即刷新
120
- await self._flush_buffer()
121
-
122
- return item
123
-
124
- async def spider_closed(self):
125
- """关闭爬虫时,确保所有剩余数据被写入"""
126
- if self.flush_task:
127
- self.flush_task.cancel()
128
- try:
129
- await self.flush_task
130
- except asyncio.CancelledError:
131
- pass
132
-
133
- # 刷最后一批数据
134
- if self.items_buffer:
135
- await self._flush_buffer()
136
-
137
- # 关闭连接池
138
- if self.pool:
139
- self.pool.close()
140
- await self.pool.wait_closed()
141
- self.logger.info("MySQL连接池已关闭")
142
-
143
-
144
- class AiomysqlMySQLPipeline:
145
- def __init__(self, crawler):
146
- self.crawler = crawler
147
- self.settings = crawler.settings
148
- self.logger = get_logger(self.__class__.__name__, self.settings.get('LOG_LEVEL'))
149
-
150
- # 配置
151
- self.table_name = (
152
- self.settings.get('MYSQL_TABLE') or
153
- getattr(crawler.spider, 'mysql_table', None) or
154
- f"{crawler.spider.name}_items"
155
- )
156
- self.batch_size = self.settings.getint('MYSQL_BATCH_SIZE', 100)
157
- self.flush_interval = self.settings.getfloat('MYSQL_FLUSH_INTERVAL', 3.0)
158
-
159
- # 连接池
160
- self._pool_lock = asyncio.Lock()
161
- self._pool_initialized = False
162
- self.pool = None
163
-
164
- # 缓冲
165
- self.items_buffer: List[Dict] = []
166
- self.buffer_lock = asyncio.Lock()
167
-
168
- # 后台任务
169
- self.flush_task: Optional[asyncio.Task] = None
170
-
171
- crawler.subscriber.subscribe(self.spider_closed, event='spider_closed')
172
-
173
- @classmethod
174
- def create_instance(cls, crawler):
175
- return cls(crawler)
176
-
177
- async def _init_pool(self):
178
- """延迟初始化连接池(线程安全)"""
179
- if self._pool_initialized:
180
- return
181
-
182
- async with self._pool_lock:
183
- if not self._pool_initialized:
184
- try:
185
- self.pool = await aiomysql.create_pool(
186
- host=self.settings.get('MYSQL_HOST', 'localhost'),
187
- port=self.settings.getint('MYSQL_PORT', 3306),
188
- user=self.settings.get('MYSQL_USER', 'root'),
189
- password=self.settings.get('MYSQL_PASSWORD', ''),
190
- db=self.settings.get('MYSQL_DB', 'scrapy_db'),
191
- minsize=self.settings.getint('MYSQL_POOL_MIN', 3),
192
- maxsize=self.settings.getint('MYSQL_POOL_MAX', 10),
193
- cursorclass=aiomysql.DictCursor,
194
- autocommit=False
195
- )
196
- self._pool_initialized = True
197
- self.logger.debug(f"aiomysql连接池已初始化(表: {self.table_name})")
198
- except Exception as e:
199
- self.logger.error(f"aiomysql连接池初始化失败: {e}")
200
- raise
201
-
202
- async def open_spider(self, spider):
203
- """爬虫启动时创建后台刷新任务"""
204
- await self._init_pool()
205
- self.flush_task = asyncio.create_task(self._flush_loop())
206
-
207
- async def _flush_loop(self):
208
- """定期刷新缓冲区"""
209
- while True:
210
- await asyncio.sleep(self.flush_interval)
211
- if len(self.items_buffer) > 0:
212
- await self._flush_buffer()
213
-
214
- async def _flush_buffer(self):
215
- """执行批量插入"""
216
- async with self.buffer_lock:
217
- if not self.items_buffer:
218
- return
219
- items_to_insert = self.items_buffer.copy()
220
- self.items_buffer.clear()
221
-
222
- try:
223
- await self._init_pool()
224
- keys = items_to_insert[0].keys()
225
- placeholders = ', '.join(['%s'] * len(keys))
226
- columns = ', '.join([f'`{k}`' for k in keys])
227
- sql = f"INSERT INTO `{self.table_name}` ({columns}) VALUES ({placeholders})"
228
-
229
- values = [list(item.values()) for item in items_to_insert]
230
-
231
- async with self.pool.acquire() as conn:
232
- async with conn.cursor() as cursor:
233
- result = await cursor.executemany(sql, values)
234
- await conn.commit()
235
-
236
- spider_name = getattr(self.crawler.spider, 'name', 'unknown')
237
- self.logger.info(f"【{spider_name}】批量插入 {result} 条记录到 {self.table_name}")
238
- self.crawler.stats.inc_value('mysql/insert_success_batch', len(items_to_insert))
239
-
240
- except aiomysql.Error as e:
241
- self.logger.error(f"aiomysql批量插入失败: {e}")
242
- self.crawler.stats.inc_value('mysql/insert_failed_batch', len(items_to_insert))
243
- raise ItemDiscard(f"MySQL错误: {e.args[1]}")
244
- except Exception as e:
245
- self.logger.error(f"未知错误: {e}")
246
- raise ItemDiscard(f"处理失败: {e}")
247
-
248
- async def process_item(self, item, spider) -> dict:
249
- item_dict = dict(item)
250
-
251
- async with self.buffer_lock:
252
- self.items_buffer.append(item_dict)
253
- if len(self.items_buffer) >= self.batch_size:
254
- await self._flush_buffer()
255
-
256
- return item
257
-
258
- async def spider_closed(self):
259
- """清理资源并提交剩余数据"""
260
- if self.flush_task:
261
- self.flush_task.cancel()
262
- try:
263
- await self.flush_task
264
- except asyncio.CancelledError:
265
- pass
266
-
267
- if self.items_buffer:
268
- await self._flush_buffer()
269
-
270
- if self.pool:
271
- self.pool.close()
272
- await self.pool.wait_closed()
273
- self.logger.info("aiomysql连接池已释放")
crawlo/utils/pqueue.py DELETED
@@ -1,174 +0,0 @@
1
- # -*- coding:UTF-8 -*-
2
- import sys
3
- import asyncio
4
- import warnings
5
- from urllib.parse import urlparse
6
- from asyncio import PriorityQueue
7
- from redis.asyncio import from_url
8
- from typing import Any, Optional, Dict, Annotated
9
- from pydantic import (
10
- BaseModel,
11
- Field,
12
- model_validator
13
- )
14
-
15
- from crawlo import Request
16
- from crawlo.settings.default_settings import REDIS_URL
17
-
18
-
19
- class SpiderPriorityQueue(PriorityQueue):
20
- """带超时功能的异步优先级队列"""
21
-
22
- def __init__(self, maxsize: int = 0) -> None:
23
- """初始化队列,maxsize为0表示无大小限制"""
24
- super().__init__(maxsize)
25
-
26
- async def get(self, timeout: float = 0.1) -> Optional[Request]:
27
- """
28
- 异步获取队列元素,带超时功能
29
-
30
- Args:
31
- timeout: 超时时间(秒),默认0.1秒
32
-
33
- Returns:
34
- 队列元素(优先级, 值)或None(超时)
35
- """
36
- try:
37
- # 根据Python版本选择超时实现方式
38
- if sys.version_info >= (3, 11):
39
- async with asyncio.timeout(timeout):
40
- return await super().get()
41
- else:
42
- return await asyncio.wait_for(super().get(), timeout=timeout)
43
- except asyncio.TimeoutError:
44
- return None
45
-
46
-
47
- class TaskModel(BaseModel):
48
- """爬虫任务数据模型 (完全兼容Pydantic V2)"""
49
- url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
50
- meta: Dict[str, Any] = Field(default_factory=dict)
51
- priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
52
-
53
- @classmethod
54
- def validate_url(cls, v: str) -> str:
55
- """验证URL格式"""
56
- if not v.startswith(('http://', 'https://')):
57
- raise ValueError('URL必须以 http:// 或 https:// 开头')
58
-
59
- parsed = urlparse(v)
60
- if not parsed.netloc:
61
- raise ValueError('URL缺少有效域名')
62
-
63
- return v.strip()
64
-
65
- @model_validator(mode='after')
66
- def validate_priority_logic(self) -> 'TaskModel':
67
- """跨字段验证示例"""
68
- if 'admin' in self.url and self.priority > 5:
69
- self.priority = 5 # 自动调整管理页面的优先级
70
- return self
71
-
72
-
73
- class DistributedPriorityQueue:
74
- def __init__(
75
- self,
76
- redis_url: str,
77
- queue_name: str = "spider_queue",
78
- max_connections: int = 10,
79
- health_check_interval: int = 30
80
- ):
81
- """
82
- Args:
83
- redis_url: redis://[:password]@host:port[/db]
84
- queue_name: Redis有序集合键名
85
- max_connections: 连接池大小
86
- health_check_interval: 连接健康检查间隔(秒)
87
- """
88
- self.redis = from_url(
89
- redis_url,
90
- max_connections=max_connections,
91
- health_check_interval=health_check_interval,
92
- socket_keepalive=True,
93
- decode_responses=True
94
- )
95
- self.queue_name = queue_name
96
-
97
- async def put(self, task: TaskModel) -> bool:
98
- """
99
- 添加任务到队列(使用Pydantic V2的model_dump_json)
100
-
101
- Args:
102
- task: 已验证的TaskModel实例
103
-
104
- Returns:
105
- bool: 是否成功添加 (Redis的ZADD返回添加数量)
106
- """
107
- with warnings.catch_warnings():
108
- warnings.simplefilter("ignore", category=DeprecationWarning)
109
- task_str = task.model_dump_json() # 正确使用V2的序列化方法
110
- return await self.redis.zadd(
111
- self.queue_name,
112
- {task_str: task.priority}
113
- ) > 0
114
-
115
- async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
116
- """
117
- 获取优先级最高的任务(自动验证)
118
-
119
- Args:
120
- timeout: 阻塞超时时间(秒)
121
-
122
- Returns:
123
- TaskModel实例或None(超时/队列空)
124
- """
125
- try:
126
- result = await self.redis.bzpopmax(
127
- self.queue_name,
128
- timeout=timeout
129
- )
130
- if result:
131
- _, task_str, _ = result
132
- with warnings.catch_warnings():
133
- warnings.simplefilter("ignore", category=DeprecationWarning)
134
- return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
135
- except Exception as e:
136
- print(f"任务获取失败: {type(e).__name__}: {e}")
137
- return None
138
-
139
- async def aclose(self):
140
- """安全关闭连接"""
141
- await self.redis.aclose()
142
-
143
- async def __aenter__(self):
144
- return self
145
-
146
- async def __aexit__(self, exc_type, exc_val, exc_tb):
147
- await self.aclose()
148
-
149
-
150
- # 使用示例
151
- async def demo():
152
- async with DistributedPriorityQueue(
153
- REDIS_URL,
154
- max_connections=20,
155
- health_check_interval=10
156
- ) as queue:
157
- # 添加任务(自动触发验证)
158
- task = TaskModel(
159
- url="https://example.com/1",
160
- priority=1,
161
- meta={"depth": 2}
162
- )
163
-
164
- if await queue.put(task):
165
- print(f"任务添加成功: {task.url}")
166
-
167
- # 获取任务
168
- if result := await queue.get(timeout=2.0):
169
- print(f"获取任务: {result.url} (优先级={result.priority})")
170
- print(f"元数据: {result.meta}")
171
-
172
-
173
- if __name__ == "__main__":
174
- asyncio.run(demo())
@@ -1,220 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: crawlo
3
- Version: 1.1.1
4
- Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
- Home-page: https://github.com/crawl-coder/Crawlo.git
6
- Author: crawl-coder
7
- Author-email: crawlo@qq.com
8
- License: MIT
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Operating System :: OS Independent
12
- Requires-Python: >=3.6
13
- Description-Content-Type: text/markdown
14
- Requires-Dist: aiohttp>=3.12.14
15
- Requires-Dist: aiomysql>=0.2.0
16
- Requires-Dist: aioredis>=2.0.1
17
- Requires-Dist: asyncmy>=0.2.10
18
- Requires-Dist: cssselect>=1.2.0
19
- Requires-Dist: dateparser>=1.2.2
20
- Requires-Dist: httpx[http2]>=0.27.0
21
- Requires-Dist: curl-cffi>=0.13.0
22
- Requires-Dist: lxml>=5.2.1
23
- Requires-Dist: motor>=3.7.0
24
- Requires-Dist: parsel>=1.9.1
25
- Requires-Dist: pydantic>=2.11.7
26
- Requires-Dist: pymongo>=4.11
27
- Requires-Dist: PyMySQL>=1.1.1
28
- Requires-Dist: python-dateutil>=2.9.0.post0
29
- Requires-Dist: redis>=6.2.0
30
- Requires-Dist: requests>=2.32.4
31
- Requires-Dist: six>=1.17.0
32
- Requires-Dist: ujson>=5.9.0
33
- Requires-Dist: urllib3>=2.5.0
34
- Requires-Dist: w3lib>=2.1.2
35
- Requires-Dist: rich>=14.1.0
36
- Requires-Dist: astor>=0.8.1
37
- Requires-Dist: watchdog>=6.0.0
38
- Provides-Extra: render
39
- Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
40
- Requires-Dist: playwright; extra == "render"
41
- Requires-Dist: selenium>=3.141.0; extra == "render"
42
- Provides-Extra: all
43
- Requires-Dist: bitarray>=1.5.3; extra == "all"
44
- Requires-Dist: PyExecJS>=1.5.1; extra == "all"
45
- Requires-Dist: pymongo>=3.10.1; extra == "all"
46
- Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
47
- Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
48
- Requires-Dist: playwright; extra == "all"
49
- Requires-Dist: selenium>=3.141.0; extra == "all"
50
-
51
- # 🕷️ Crawlo - 轻量级异步爬虫框架
52
-
53
- > 一个简洁、易用、可扩展的 Python 异步爬虫框架,灵感源自 Scrapy,但更轻量、更易上手。
54
-
55
- 🚀 支持命令行操作、爬虫生成、合规检查、运行监控与统计分析,适合快速开发中小型爬虫项目。
56
-
57
- ---
58
-
59
- ## 📦 特性
60
-
61
- - ✅ **命令行驱动**:`crawlo startproject`, `crawlo genspider` 等
62
- - ✅ **自动发现爬虫**:无需手动注册,自动加载 `spiders/` 模块
63
- - ✅ **异步核心**:基于 `asyncio` 实现高并发抓取
64
- - ✅ **灵活配置**:通过 `crawlo.cfg` 和 `settings.py` 管理项目
65
- - ✅ **爬虫检查**:`crawlo check` 验证爬虫定义是否合规
66
- - ✅ **运行统计**:`crawlo stats` 查看历史运行指标(持久化存储)
67
- - ✅ **批量运行**:支持 `crawlo run all` 启动所有爬虫
68
- - ✅ **日志与调试**:结构化日志输出,便于排查问题
69
-
70
- ---
71
-
72
- ## 🚀 快速开始
73
-
74
- ### 1. 安装 Crawlo
75
-
76
- ```bash
77
- pip install crawlo
78
- ```
79
-
80
- > ⚠️ 当前为开发阶段,建议使用源码安装:
81
- >
82
- > ```bash
83
- > git clone https://github.com/yourname/crawlo.git
84
- > pip install -e crawlo
85
- > ```
86
-
87
- ### 2. 创建项目
88
-
89
- ```bash
90
- crawlo startproject myproject
91
- cd myproject
92
- ```
93
-
94
- 生成项目结构:
95
-
96
- ```
97
- myproject/
98
- ├── crawlo.cfg
99
- ├── myproject/
100
- │ ├── __init__.py
101
- │ ├── settings.py
102
- │ └── spiders/
103
- │ ├── __init__.py
104
- │ └── (你的爬虫将在这里)
105
- ```
106
-
107
- ### 3. 生成爬虫
108
-
109
- ```bash
110
- crawlo genspider example example.com
111
- ```
112
-
113
- 生成 `spiders/example.py`:
114
-
115
- ```python
116
- class ExampleSpider(Spider):
117
- name = "example"
118
- start_urls = ["https://example.com"]
119
-
120
- def parse(self, response):
121
- # 解析逻辑
122
- pass
123
- ```
124
-
125
- ### 4. 检查爬虫合规性
126
-
127
- ```bash
128
- crawlo check
129
- ```
130
-
131
- 输出示例:
132
-
133
- ```
134
- 🔍 Checking 1 spider(s)...
135
- ✅ example ExampleSpider (OK)
136
- 🎉 All spiders are compliant!
137
- ```
138
-
139
- ### 5. 运行爬虫
140
-
141
- ```bash
142
- # 运行单个爬虫
143
- crawlo run example
144
-
145
- # 运行所有爬虫
146
- crawlo run all
147
- ```
148
-
149
- ### 6. 查看运行统计
150
-
151
- ```bash
152
- crawlo stats
153
- ```
154
-
155
- 查看最近一次运行的请求、响应、项目数等指标:
156
-
157
- ```
158
- 📊 Recent Spider Statistics (last run):
159
- 🕷️ example
160
- downloader/request_count 1
161
- item_scraped_count 1
162
- log_count/INFO 7
163
- ```
164
-
165
- ---
166
-
167
- ## 🛠️ 命令列表
168
-
169
- | 命令 | 说明 |
170
- |------|------|
171
- | `crawlo startproject <name>` | 创建新项目 |
172
- | `crawlo genspider <name> <domain>` | 生成爬虫模板 |
173
- | `crawlo list` | 列出所有已注册的爬虫 |
174
- | `crawlo check` | 检查爬虫定义是否合规 |
175
- | `crawlo run <spider_name>` | 运行指定爬虫 |
176
- | `crawlo run all` | 运行所有爬虫 |
177
- | `crawlo stats` | 查看最近运行的统计信息 |
178
- | `crawlo stats <spider_name>` | 查看指定爬虫的统计 |
179
-
180
- ---
181
-
182
- ## 📁 项目结构说明
183
-
184
- ```ini
185
- # crawlo.cfg
186
- [settings]
187
- default = myproject.settings
188
- ```
189
-
190
- ```python
191
- # settings.py
192
- BOT_NAME = "myproject"
193
- LOG_LEVEL = "DEBUG"
194
- CONCURRENT_REQUESTS = 3
195
- DOWNLOAD_DELAY = 1.0
196
- # 其他配置...
197
- ```
198
-
199
- ---
200
-
201
- ## 📊 统计持久化
202
-
203
- 每次爬虫运行结束后,统计信息会自动保存到:
204
-
205
- ```
206
- logs/stats/<spider_name>_YYYYMMDD_HHMMSS.json
207
- ```
208
-
209
- 可通过 `crawlo stats` 命令读取,支持跨进程查看。
210
-
211
- ---
212
-
213
- ## 🧪 开发者提示
214
-
215
- - 确保 `spiders/__init__.py` 中导入了你的爬虫类,否则无法被发现
216
- - 使用 `get_project_root()` 自动定位项目根目录(通过查找 `crawlo.cfg`)
217
- - 所有命令行工具均支持直接运行:`python -m crawlo.commands.list`
218
-
219
- ---
220
-