crawlo 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +33 -24
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -155
- crawlo/commands/genspider.py +125 -110
- crawlo/commands/list.py +147 -119
- crawlo/commands/run.py +285 -170
- crawlo/commands/startproject.py +111 -101
- crawlo/commands/stats.py +188 -167
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +158 -158
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +57 -57
- crawlo/crawler.py +494 -492
- crawlo/downloader/__init__.py +78 -78
- crawlo/downloader/aiohttp_downloader.py +199 -199
- crawlo/downloader/cffi_downloader.py +242 -277
- crawlo/downloader/httpx_downloader.py +246 -246
- crawlo/event.py +11 -11
- crawlo/exceptions.py +78 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +37 -37
- crawlo/filters/aioredis_filter.py +150 -150
- crawlo/filters/memory_filter.py +202 -202
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +245 -245
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +90 -90
- crawlo/network/__init__.py +7 -7
- crawlo/network/request.py +203 -203
- crawlo/network/response.py +166 -166
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_batch_pipline.py +272 -272
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/project.py +153 -0
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +166 -168
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +129 -129
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +27 -27
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +75 -75
- crawlo/templates/project/pipelines.py.tmpl +63 -63
- crawlo/templates/project/settings.py.tmpl +54 -54
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +31 -31
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/log.py +128 -128
- crawlo/utils/pqueue.py +173 -173
- crawlo/utils/request.py +267 -267
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.1.1.dist-info/METADATA +220 -0
- crawlo-1.1.1.dist-info/RECORD +100 -0
- examples/__init__.py +7 -0
- examples/baidu_spider/__init__.py +7 -0
- examples/baidu_spider/demo.py +94 -0
- examples/baidu_spider/items.py +46 -0
- examples/baidu_spider/middleware.py +49 -0
- examples/baidu_spider/pipeline.py +55 -0
- examples/baidu_spider/run.py +27 -0
- examples/baidu_spider/settings.py +121 -0
- examples/baidu_spider/spiders/__init__.py +7 -0
- examples/baidu_spider/spiders/bai_du.py +61 -0
- examples/baidu_spider/spiders/miit.py +159 -0
- examples/baidu_spider/spiders/sina.py +79 -0
- tests/__init__.py +7 -7
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- crawlo/utils/concurrency_manager.py +0 -125
- crawlo/utils/project.py +0 -197
- crawlo-1.1.0.dist-info/METADATA +0 -49
- crawlo-1.1.0.dist-info/RECORD +0 -97
- examples/gxb/__init__.py +0 -0
- examples/gxb/items.py +0 -36
- examples/gxb/run.py +0 -16
- examples/gxb/settings.py +0 -72
- examples/gxb/spider/__init__.py +0 -2
- examples/gxb/spider/miit_spider.py +0 -180
- examples/gxb/spider/telecom_device.py +0 -129
- {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/WHEEL +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/top_level.txt +0 -0
crawlo/utils/pqueue.py
CHANGED
|
@@ -1,174 +1,174 @@
|
|
|
1
|
-
# -*- coding:UTF-8 -*-
|
|
2
|
-
import sys
|
|
3
|
-
import asyncio
|
|
4
|
-
import warnings
|
|
5
|
-
from urllib.parse import urlparse
|
|
6
|
-
from asyncio import PriorityQueue
|
|
7
|
-
from redis.asyncio import from_url
|
|
8
|
-
from typing import Any, Optional, Dict, Annotated
|
|
9
|
-
from pydantic import (
|
|
10
|
-
BaseModel,
|
|
11
|
-
Field,
|
|
12
|
-
model_validator
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
from crawlo import Request
|
|
16
|
-
from crawlo.settings.default_settings import REDIS_URL
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class SpiderPriorityQueue(PriorityQueue):
|
|
20
|
-
"""带超时功能的异步优先级队列"""
|
|
21
|
-
|
|
22
|
-
def __init__(self, maxsize: int = 0) -> None:
|
|
23
|
-
"""初始化队列,maxsize为0表示无大小限制"""
|
|
24
|
-
super().__init__(maxsize)
|
|
25
|
-
|
|
26
|
-
async def get(self, timeout: float = 0.1) -> Optional[Request]:
|
|
27
|
-
"""
|
|
28
|
-
异步获取队列元素,带超时功能
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
timeout: 超时时间(秒),默认0.1秒
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
队列元素(优先级, 值)或None(超时)
|
|
35
|
-
"""
|
|
36
|
-
try:
|
|
37
|
-
# 根据Python版本选择超时实现方式
|
|
38
|
-
if sys.version_info >= (3, 11):
|
|
39
|
-
async with asyncio.timeout(timeout):
|
|
40
|
-
return await super().get()
|
|
41
|
-
else:
|
|
42
|
-
return await asyncio.wait_for(super().get(), timeout=timeout)
|
|
43
|
-
except asyncio.TimeoutError:
|
|
44
|
-
return None
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class TaskModel(BaseModel):
|
|
48
|
-
"""爬虫任务数据模型 (完全兼容Pydantic V2)"""
|
|
49
|
-
url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
|
|
50
|
-
meta: Dict[str, Any] = Field(default_factory=dict)
|
|
51
|
-
priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
|
|
52
|
-
|
|
53
|
-
@classmethod
|
|
54
|
-
def validate_url(cls, v: str) -> str:
|
|
55
|
-
"""验证URL格式"""
|
|
56
|
-
if not v.startswith(('http://', 'https://')):
|
|
57
|
-
raise ValueError('URL必须以 http:// 或 https:// 开头')
|
|
58
|
-
|
|
59
|
-
parsed = urlparse(v)
|
|
60
|
-
if not parsed.netloc:
|
|
61
|
-
raise ValueError('URL缺少有效域名')
|
|
62
|
-
|
|
63
|
-
return v.strip()
|
|
64
|
-
|
|
65
|
-
@model_validator(mode='after')
|
|
66
|
-
def validate_priority_logic(self) -> 'TaskModel':
|
|
67
|
-
"""跨字段验证示例"""
|
|
68
|
-
if 'admin' in self.url and self.priority > 5:
|
|
69
|
-
self.priority = 5 # 自动调整管理页面的优先级
|
|
70
|
-
return self
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class DistributedPriorityQueue:
|
|
74
|
-
def __init__(
|
|
75
|
-
self,
|
|
76
|
-
redis_url: str,
|
|
77
|
-
queue_name: str = "spider_queue",
|
|
78
|
-
max_connections: int = 10,
|
|
79
|
-
health_check_interval: int = 30
|
|
80
|
-
):
|
|
81
|
-
"""
|
|
82
|
-
Args:
|
|
83
|
-
redis_url: redis://[:password]@host:port[/db]
|
|
84
|
-
queue_name: Redis有序集合键名
|
|
85
|
-
max_connections: 连接池大小
|
|
86
|
-
health_check_interval: 连接健康检查间隔(秒)
|
|
87
|
-
"""
|
|
88
|
-
self.redis = from_url(
|
|
89
|
-
redis_url,
|
|
90
|
-
max_connections=max_connections,
|
|
91
|
-
health_check_interval=health_check_interval,
|
|
92
|
-
socket_keepalive=True,
|
|
93
|
-
decode_responses=True
|
|
94
|
-
)
|
|
95
|
-
self.queue_name = queue_name
|
|
96
|
-
|
|
97
|
-
async def put(self, task: TaskModel) -> bool:
|
|
98
|
-
"""
|
|
99
|
-
添加任务到队列(使用Pydantic V2的model_dump_json)
|
|
100
|
-
|
|
101
|
-
Args:
|
|
102
|
-
task: 已验证的TaskModel实例
|
|
103
|
-
|
|
104
|
-
Returns:
|
|
105
|
-
bool: 是否成功添加 (Redis的ZADD返回添加数量)
|
|
106
|
-
"""
|
|
107
|
-
with warnings.catch_warnings():
|
|
108
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
109
|
-
task_str = task.model_dump_json() # 正确使用V2的序列化方法
|
|
110
|
-
return await self.redis.zadd(
|
|
111
|
-
self.queue_name,
|
|
112
|
-
{task_str: task.priority}
|
|
113
|
-
) > 0
|
|
114
|
-
|
|
115
|
-
async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
|
|
116
|
-
"""
|
|
117
|
-
获取优先级最高的任务(自动验证)
|
|
118
|
-
|
|
119
|
-
Args:
|
|
120
|
-
timeout: 阻塞超时时间(秒)
|
|
121
|
-
|
|
122
|
-
Returns:
|
|
123
|
-
TaskModel实例或None(超时/队列空)
|
|
124
|
-
"""
|
|
125
|
-
try:
|
|
126
|
-
result = await self.redis.bzpopmax(
|
|
127
|
-
self.queue_name,
|
|
128
|
-
timeout=timeout
|
|
129
|
-
)
|
|
130
|
-
if result:
|
|
131
|
-
_, task_str, _ = result
|
|
132
|
-
with warnings.catch_warnings():
|
|
133
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
134
|
-
return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
|
|
135
|
-
except Exception as e:
|
|
136
|
-
print(f"任务获取失败: {type(e).__name__}: {e}")
|
|
137
|
-
return None
|
|
138
|
-
|
|
139
|
-
async def aclose(self):
|
|
140
|
-
"""安全关闭连接"""
|
|
141
|
-
await self.redis.aclose()
|
|
142
|
-
|
|
143
|
-
async def __aenter__(self):
|
|
144
|
-
return self
|
|
145
|
-
|
|
146
|
-
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
147
|
-
await self.aclose()
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
# 使用示例
|
|
151
|
-
async def demo():
|
|
152
|
-
async with DistributedPriorityQueue(
|
|
153
|
-
REDIS_URL,
|
|
154
|
-
max_connections=20,
|
|
155
|
-
health_check_interval=10
|
|
156
|
-
) as queue:
|
|
157
|
-
# 添加任务(自动触发验证)
|
|
158
|
-
task = TaskModel(
|
|
159
|
-
url="https://example.com/1",
|
|
160
|
-
priority=1,
|
|
161
|
-
meta={"depth": 2}
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
if await queue.put(task):
|
|
165
|
-
print(f"任务添加成功: {task.url}")
|
|
166
|
-
|
|
167
|
-
# 获取任务
|
|
168
|
-
if result := await queue.get(timeout=2.0):
|
|
169
|
-
print(f"获取任务: {result.url} (优先级={result.priority})")
|
|
170
|
-
print(f"元数据: {result.meta}")
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
if __name__ == "__main__":
|
|
1
|
+
# -*- coding:UTF-8 -*-
|
|
2
|
+
import sys
|
|
3
|
+
import asyncio
|
|
4
|
+
import warnings
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
from asyncio import PriorityQueue
|
|
7
|
+
from redis.asyncio import from_url
|
|
8
|
+
from typing import Any, Optional, Dict, Annotated
|
|
9
|
+
from pydantic import (
|
|
10
|
+
BaseModel,
|
|
11
|
+
Field,
|
|
12
|
+
model_validator
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from crawlo import Request
|
|
16
|
+
from crawlo.settings.default_settings import REDIS_URL
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SpiderPriorityQueue(PriorityQueue):
|
|
20
|
+
"""带超时功能的异步优先级队列"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, maxsize: int = 0) -> None:
|
|
23
|
+
"""初始化队列,maxsize为0表示无大小限制"""
|
|
24
|
+
super().__init__(maxsize)
|
|
25
|
+
|
|
26
|
+
async def get(self, timeout: float = 0.1) -> Optional[Request]:
|
|
27
|
+
"""
|
|
28
|
+
异步获取队列元素,带超时功能
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
timeout: 超时时间(秒),默认0.1秒
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
队列元素(优先级, 值)或None(超时)
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
# 根据Python版本选择超时实现方式
|
|
38
|
+
if sys.version_info >= (3, 11):
|
|
39
|
+
async with asyncio.timeout(timeout):
|
|
40
|
+
return await super().get()
|
|
41
|
+
else:
|
|
42
|
+
return await asyncio.wait_for(super().get(), timeout=timeout)
|
|
43
|
+
except asyncio.TimeoutError:
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class TaskModel(BaseModel):
|
|
48
|
+
"""爬虫任务数据模型 (完全兼容Pydantic V2)"""
|
|
49
|
+
url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
|
|
50
|
+
meta: Dict[str, Any] = Field(default_factory=dict)
|
|
51
|
+
priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def validate_url(cls, v: str) -> str:
|
|
55
|
+
"""验证URL格式"""
|
|
56
|
+
if not v.startswith(('http://', 'https://')):
|
|
57
|
+
raise ValueError('URL必须以 http:// 或 https:// 开头')
|
|
58
|
+
|
|
59
|
+
parsed = urlparse(v)
|
|
60
|
+
if not parsed.netloc:
|
|
61
|
+
raise ValueError('URL缺少有效域名')
|
|
62
|
+
|
|
63
|
+
return v.strip()
|
|
64
|
+
|
|
65
|
+
@model_validator(mode='after')
|
|
66
|
+
def validate_priority_logic(self) -> 'TaskModel':
|
|
67
|
+
"""跨字段验证示例"""
|
|
68
|
+
if 'admin' in self.url and self.priority > 5:
|
|
69
|
+
self.priority = 5 # 自动调整管理页面的优先级
|
|
70
|
+
return self
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class DistributedPriorityQueue:
|
|
74
|
+
def __init__(
|
|
75
|
+
self,
|
|
76
|
+
redis_url: str,
|
|
77
|
+
queue_name: str = "spider_queue",
|
|
78
|
+
max_connections: int = 10,
|
|
79
|
+
health_check_interval: int = 30
|
|
80
|
+
):
|
|
81
|
+
"""
|
|
82
|
+
Args:
|
|
83
|
+
redis_url: redis://[:password]@host:port[/db]
|
|
84
|
+
queue_name: Redis有序集合键名
|
|
85
|
+
max_connections: 连接池大小
|
|
86
|
+
health_check_interval: 连接健康检查间隔(秒)
|
|
87
|
+
"""
|
|
88
|
+
self.redis = from_url(
|
|
89
|
+
redis_url,
|
|
90
|
+
max_connections=max_connections,
|
|
91
|
+
health_check_interval=health_check_interval,
|
|
92
|
+
socket_keepalive=True,
|
|
93
|
+
decode_responses=True
|
|
94
|
+
)
|
|
95
|
+
self.queue_name = queue_name
|
|
96
|
+
|
|
97
|
+
async def put(self, task: TaskModel) -> bool:
|
|
98
|
+
"""
|
|
99
|
+
添加任务到队列(使用Pydantic V2的model_dump_json)
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
task: 已验证的TaskModel实例
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
bool: 是否成功添加 (Redis的ZADD返回添加数量)
|
|
106
|
+
"""
|
|
107
|
+
with warnings.catch_warnings():
|
|
108
|
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
109
|
+
task_str = task.model_dump_json() # 正确使用V2的序列化方法
|
|
110
|
+
return await self.redis.zadd(
|
|
111
|
+
self.queue_name,
|
|
112
|
+
{task_str: task.priority}
|
|
113
|
+
) > 0
|
|
114
|
+
|
|
115
|
+
async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
|
|
116
|
+
"""
|
|
117
|
+
获取优先级最高的任务(自动验证)
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
timeout: 阻塞超时时间(秒)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
TaskModel实例或None(超时/队列空)
|
|
124
|
+
"""
|
|
125
|
+
try:
|
|
126
|
+
result = await self.redis.bzpopmax(
|
|
127
|
+
self.queue_name,
|
|
128
|
+
timeout=timeout
|
|
129
|
+
)
|
|
130
|
+
if result:
|
|
131
|
+
_, task_str, _ = result
|
|
132
|
+
with warnings.catch_warnings():
|
|
133
|
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
134
|
+
return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
|
|
135
|
+
except Exception as e:
|
|
136
|
+
print(f"任务获取失败: {type(e).__name__}: {e}")
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
async def aclose(self):
|
|
140
|
+
"""安全关闭连接"""
|
|
141
|
+
await self.redis.aclose()
|
|
142
|
+
|
|
143
|
+
async def __aenter__(self):
|
|
144
|
+
return self
|
|
145
|
+
|
|
146
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
147
|
+
await self.aclose()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# 使用示例
|
|
151
|
+
async def demo():
|
|
152
|
+
async with DistributedPriorityQueue(
|
|
153
|
+
REDIS_URL,
|
|
154
|
+
max_connections=20,
|
|
155
|
+
health_check_interval=10
|
|
156
|
+
) as queue:
|
|
157
|
+
# 添加任务(自动触发验证)
|
|
158
|
+
task = TaskModel(
|
|
159
|
+
url="https://example.com/1",
|
|
160
|
+
priority=1,
|
|
161
|
+
meta={"depth": 2}
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
if await queue.put(task):
|
|
165
|
+
print(f"任务添加成功: {task.url}")
|
|
166
|
+
|
|
167
|
+
# 获取任务
|
|
168
|
+
if result := await queue.get(timeout=2.0):
|
|
169
|
+
print(f"获取任务: {result.url} (优先级={result.priority})")
|
|
170
|
+
print(f"元数据: {result.meta}")
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
if __name__ == "__main__":
|
|
174
174
|
asyncio.run(demo())
|