crawlo 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +25 -9
- crawlo/__version__.py +1 -1
- crawlo/cli.py +41 -0
- crawlo/commands/__init__.py +10 -0
- crawlo/commands/genspider.py +111 -0
- crawlo/commands/run.py +149 -0
- crawlo/commands/startproject.py +101 -0
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +158 -158
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +57 -57
- crawlo/crawler.py +219 -242
- crawlo/downloader/__init__.py +78 -78
- crawlo/downloader/aiohttp_downloader.py +200 -259
- crawlo/downloader/cffi_downloader.py +277 -0
- crawlo/downloader/httpx_downloader.py +246 -187
- crawlo/event.py +11 -11
- crawlo/exceptions.py +78 -64
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +35 -0
- crawlo/filters/__init__.py +37 -37
- crawlo/filters/aioredis_filter.py +150 -150
- crawlo/filters/memory_filter.py +202 -202
- crawlo/items/__init__.py +22 -62
- crawlo/items/base.py +31 -0
- crawlo/items/fields.py +54 -0
- crawlo/items/items.py +105 -119
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -140
- crawlo/middleware/proxy.py +246 -0
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +90 -90
- crawlo/network/__init__.py +7 -7
- crawlo/network/request.py +203 -204
- crawlo/network/response.py +166 -166
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_batch_pipline.py +273 -134
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +169 -94
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +41 -36
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +27 -27
- crawlo/templates/crawlo.cfg.tmpl +11 -0
- crawlo/templates/project/__init__.py.tmpl +4 -0
- crawlo/templates/project/items.py.tmpl +18 -0
- crawlo/templates/project/middlewares.py.tmpl +76 -0
- crawlo/templates/project/pipelines.py.tmpl +64 -0
- crawlo/templates/project/settings.py.tmpl +54 -0
- crawlo/templates/project/spiders/__init__.py.tmpl +6 -0
- crawlo/templates/spider/spider.py.tmpl +32 -0
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/concurrency_manager.py +124 -124
- crawlo/utils/date_tools.py +233 -177
- crawlo/utils/db_helper.py +344 -0
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/log.py +129 -39
- crawlo/utils/pqueue.py +173 -173
- crawlo/utils/project.py +199 -59
- crawlo/utils/request.py +267 -122
- crawlo/utils/spider_loader.py +63 -0
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +5 -303
- crawlo/utils/url.py +39 -39
- {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/METADATA +49 -48
- crawlo-1.0.6.dist-info/RECORD +94 -0
- crawlo-1.0.6.dist-info/entry_points.txt +2 -0
- {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/top_level.txt +1 -0
- examples/gxb/items.py +36 -0
- examples/gxb/run.py +16 -0
- examples/gxb/settings.py +72 -0
- examples/gxb/spider/__init__.py +0 -0
- examples/gxb/spider/miit_spider.py +180 -0
- examples/gxb/spider/telecom_device.py +129 -0
- tests/__init__.py +7 -7
- tests/test_proxy_health_check.py +33 -0
- tests/test_proxy_middleware_integration.py +137 -0
- tests/test_proxy_providers.py +57 -0
- tests/test_proxy_stats.py +20 -0
- tests/test_proxy_strategies.py +60 -0
- crawlo/downloader/playwright_downloader.py +0 -161
- crawlo/templates/item_template.tmpl +0 -22
- crawlo/templates/project_template/main.py +0 -33
- crawlo/templates/project_template/setting.py +0 -190
- crawlo/templates/spider_template.tmpl +0 -31
- crawlo-1.0.4.dist-info/RECORD +0 -79
- crawlo-1.0.4.dist-info/entry_points.txt +0 -2
- tests/baidu_spider/__init__.py +0 -7
- tests/baidu_spider/demo.py +0 -94
- tests/baidu_spider/items.py +0 -25
- tests/baidu_spider/middleware.py +0 -49
- tests/baidu_spider/pipeline.py +0 -55
- tests/baidu_spider/request_fingerprints.txt +0 -9
- tests/baidu_spider/run.py +0 -27
- tests/baidu_spider/settings.py +0 -80
- tests/baidu_spider/spiders/__init__.py +0 -7
- tests/baidu_spider/spiders/bai_du.py +0 -61
- tests/baidu_spider/spiders/sina.py +0 -79
- {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/WHEEL +0 -0
- {crawlo/templates/project_template/items → examples}/__init__.py +0 -0
- {crawlo/templates/project_template/spiders → examples/gxb}/__init__.py +0 -0
crawlo/utils/pqueue.py
CHANGED
|
@@ -1,174 +1,174 @@
|
|
|
1
|
-
# -*- coding:UTF-8 -*-
|
|
2
|
-
import sys
|
|
3
|
-
import asyncio
|
|
4
|
-
import warnings
|
|
5
|
-
from urllib.parse import urlparse
|
|
6
|
-
from asyncio import PriorityQueue
|
|
7
|
-
from redis.asyncio import from_url
|
|
8
|
-
from typing import Any, Optional, Dict, Annotated
|
|
9
|
-
from pydantic import (
|
|
10
|
-
BaseModel,
|
|
11
|
-
Field,
|
|
12
|
-
model_validator
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
from crawlo import Request
|
|
16
|
-
from crawlo.settings.default_settings import REDIS_URL
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class SpiderPriorityQueue(PriorityQueue):
|
|
20
|
-
"""带超时功能的异步优先级队列"""
|
|
21
|
-
|
|
22
|
-
def __init__(self, maxsize: int = 0) -> None:
|
|
23
|
-
"""初始化队列,maxsize为0表示无大小限制"""
|
|
24
|
-
super().__init__(maxsize)
|
|
25
|
-
|
|
26
|
-
async def get(self, timeout: float = 0.1) -> Optional[Request]:
|
|
27
|
-
"""
|
|
28
|
-
异步获取队列元素,带超时功能
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
timeout: 超时时间(秒),默认0.1秒
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
队列元素(优先级, 值)或None(超时)
|
|
35
|
-
"""
|
|
36
|
-
try:
|
|
37
|
-
# 根据Python版本选择超时实现方式
|
|
38
|
-
if sys.version_info >= (3, 11):
|
|
39
|
-
async with asyncio.timeout(timeout):
|
|
40
|
-
return await super().get()
|
|
41
|
-
else:
|
|
42
|
-
return await asyncio.wait_for(super().get(), timeout=timeout)
|
|
43
|
-
except asyncio.TimeoutError:
|
|
44
|
-
return None
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class TaskModel(BaseModel):
|
|
48
|
-
"""爬虫任务数据模型 (完全兼容Pydantic V2)"""
|
|
49
|
-
url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
|
|
50
|
-
meta: Dict[str, Any] = Field(default_factory=dict)
|
|
51
|
-
priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
|
|
52
|
-
|
|
53
|
-
@classmethod
|
|
54
|
-
def validate_url(cls, v: str) -> str:
|
|
55
|
-
"""验证URL格式"""
|
|
56
|
-
if not v.startswith(('http://', 'https://')):
|
|
57
|
-
raise ValueError('URL必须以 http:// 或 https:// 开头')
|
|
58
|
-
|
|
59
|
-
parsed = urlparse(v)
|
|
60
|
-
if not parsed.netloc:
|
|
61
|
-
raise ValueError('URL缺少有效域名')
|
|
62
|
-
|
|
63
|
-
return v.strip()
|
|
64
|
-
|
|
65
|
-
@model_validator(mode='after')
|
|
66
|
-
def validate_priority_logic(self) -> 'TaskModel':
|
|
67
|
-
"""跨字段验证示例"""
|
|
68
|
-
if 'admin' in self.url and self.priority > 5:
|
|
69
|
-
self.priority = 5 # 自动调整管理页面的优先级
|
|
70
|
-
return self
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class DistributedPriorityQueue:
|
|
74
|
-
def __init__(
|
|
75
|
-
self,
|
|
76
|
-
redis_url: str,
|
|
77
|
-
queue_name: str = "spider_queue",
|
|
78
|
-
max_connections: int = 10,
|
|
79
|
-
health_check_interval: int = 30
|
|
80
|
-
):
|
|
81
|
-
"""
|
|
82
|
-
Args:
|
|
83
|
-
redis_url: redis://[:password]@host:port[/db]
|
|
84
|
-
queue_name: Redis有序集合键名
|
|
85
|
-
max_connections: 连接池大小
|
|
86
|
-
health_check_interval: 连接健康检查间隔(秒)
|
|
87
|
-
"""
|
|
88
|
-
self.redis = from_url(
|
|
89
|
-
redis_url,
|
|
90
|
-
max_connections=max_connections,
|
|
91
|
-
health_check_interval=health_check_interval,
|
|
92
|
-
socket_keepalive=True,
|
|
93
|
-
decode_responses=True
|
|
94
|
-
)
|
|
95
|
-
self.queue_name = queue_name
|
|
96
|
-
|
|
97
|
-
async def put(self, task: TaskModel) -> bool:
|
|
98
|
-
"""
|
|
99
|
-
添加任务到队列(使用Pydantic V2的model_dump_json)
|
|
100
|
-
|
|
101
|
-
Args:
|
|
102
|
-
task: 已验证的TaskModel实例
|
|
103
|
-
|
|
104
|
-
Returns:
|
|
105
|
-
bool: 是否成功添加 (Redis的ZADD返回添加数量)
|
|
106
|
-
"""
|
|
107
|
-
with warnings.catch_warnings():
|
|
108
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
109
|
-
task_str = task.model_dump_json() # 正确使用V2的序列化方法
|
|
110
|
-
return await self.redis.zadd(
|
|
111
|
-
self.queue_name,
|
|
112
|
-
{task_str: task.priority}
|
|
113
|
-
) > 0
|
|
114
|
-
|
|
115
|
-
async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
|
|
116
|
-
"""
|
|
117
|
-
获取优先级最高的任务(自动验证)
|
|
118
|
-
|
|
119
|
-
Args:
|
|
120
|
-
timeout: 阻塞超时时间(秒)
|
|
121
|
-
|
|
122
|
-
Returns:
|
|
123
|
-
TaskModel实例或None(超时/队列空)
|
|
124
|
-
"""
|
|
125
|
-
try:
|
|
126
|
-
result = await self.redis.bzpopmax(
|
|
127
|
-
self.queue_name,
|
|
128
|
-
timeout=timeout
|
|
129
|
-
)
|
|
130
|
-
if result:
|
|
131
|
-
_, task_str, _ = result
|
|
132
|
-
with warnings.catch_warnings():
|
|
133
|
-
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
134
|
-
return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
|
|
135
|
-
except Exception as e:
|
|
136
|
-
print(f"任务获取失败: {type(e).__name__}: {e}")
|
|
137
|
-
return None
|
|
138
|
-
|
|
139
|
-
async def aclose(self):
|
|
140
|
-
"""安全关闭连接"""
|
|
141
|
-
await self.redis.aclose()
|
|
142
|
-
|
|
143
|
-
async def __aenter__(self):
|
|
144
|
-
return self
|
|
145
|
-
|
|
146
|
-
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
147
|
-
await self.aclose()
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
# 使用示例
|
|
151
|
-
async def demo():
|
|
152
|
-
async with DistributedPriorityQueue(
|
|
153
|
-
REDIS_URL,
|
|
154
|
-
max_connections=20,
|
|
155
|
-
health_check_interval=10
|
|
156
|
-
) as queue:
|
|
157
|
-
# 添加任务(自动触发验证)
|
|
158
|
-
task = TaskModel(
|
|
159
|
-
url="https://example.com/1",
|
|
160
|
-
priority=1,
|
|
161
|
-
meta={"depth": 2}
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
if await queue.put(task):
|
|
165
|
-
print(f"任务添加成功: {task.url}")
|
|
166
|
-
|
|
167
|
-
# 获取任务
|
|
168
|
-
if result := await queue.get(timeout=2.0):
|
|
169
|
-
print(f"获取任务: {result.url} (优先级={result.priority})")
|
|
170
|
-
print(f"元数据: {result.meta}")
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
if __name__ == "__main__":
|
|
1
|
+
# -*- coding:UTF-8 -*-
|
|
2
|
+
import sys
|
|
3
|
+
import asyncio
|
|
4
|
+
import warnings
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
from asyncio import PriorityQueue
|
|
7
|
+
from redis.asyncio import from_url
|
|
8
|
+
from typing import Any, Optional, Dict, Annotated
|
|
9
|
+
from pydantic import (
|
|
10
|
+
BaseModel,
|
|
11
|
+
Field,
|
|
12
|
+
model_validator
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from crawlo import Request
|
|
16
|
+
from crawlo.settings.default_settings import REDIS_URL
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SpiderPriorityQueue(PriorityQueue):
|
|
20
|
+
"""带超时功能的异步优先级队列"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, maxsize: int = 0) -> None:
|
|
23
|
+
"""初始化队列,maxsize为0表示无大小限制"""
|
|
24
|
+
super().__init__(maxsize)
|
|
25
|
+
|
|
26
|
+
async def get(self, timeout: float = 0.1) -> Optional[Request]:
|
|
27
|
+
"""
|
|
28
|
+
异步获取队列元素,带超时功能
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
timeout: 超时时间(秒),默认0.1秒
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
队列元素(优先级, 值)或None(超时)
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
# 根据Python版本选择超时实现方式
|
|
38
|
+
if sys.version_info >= (3, 11):
|
|
39
|
+
async with asyncio.timeout(timeout):
|
|
40
|
+
return await super().get()
|
|
41
|
+
else:
|
|
42
|
+
return await asyncio.wait_for(super().get(), timeout=timeout)
|
|
43
|
+
except asyncio.TimeoutError:
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class TaskModel(BaseModel):
|
|
48
|
+
"""爬虫任务数据模型 (完全兼容Pydantic V2)"""
|
|
49
|
+
url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
|
|
50
|
+
meta: Dict[str, Any] = Field(default_factory=dict)
|
|
51
|
+
priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def validate_url(cls, v: str) -> str:
|
|
55
|
+
"""验证URL格式"""
|
|
56
|
+
if not v.startswith(('http://', 'https://')):
|
|
57
|
+
raise ValueError('URL必须以 http:// 或 https:// 开头')
|
|
58
|
+
|
|
59
|
+
parsed = urlparse(v)
|
|
60
|
+
if not parsed.netloc:
|
|
61
|
+
raise ValueError('URL缺少有效域名')
|
|
62
|
+
|
|
63
|
+
return v.strip()
|
|
64
|
+
|
|
65
|
+
@model_validator(mode='after')
|
|
66
|
+
def validate_priority_logic(self) -> 'TaskModel':
|
|
67
|
+
"""跨字段验证示例"""
|
|
68
|
+
if 'admin' in self.url and self.priority > 5:
|
|
69
|
+
self.priority = 5 # 自动调整管理页面的优先级
|
|
70
|
+
return self
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class DistributedPriorityQueue:
|
|
74
|
+
def __init__(
|
|
75
|
+
self,
|
|
76
|
+
redis_url: str,
|
|
77
|
+
queue_name: str = "spider_queue",
|
|
78
|
+
max_connections: int = 10,
|
|
79
|
+
health_check_interval: int = 30
|
|
80
|
+
):
|
|
81
|
+
"""
|
|
82
|
+
Args:
|
|
83
|
+
redis_url: redis://[:password]@host:port[/db]
|
|
84
|
+
queue_name: Redis有序集合键名
|
|
85
|
+
max_connections: 连接池大小
|
|
86
|
+
health_check_interval: 连接健康检查间隔(秒)
|
|
87
|
+
"""
|
|
88
|
+
self.redis = from_url(
|
|
89
|
+
redis_url,
|
|
90
|
+
max_connections=max_connections,
|
|
91
|
+
health_check_interval=health_check_interval,
|
|
92
|
+
socket_keepalive=True,
|
|
93
|
+
decode_responses=True
|
|
94
|
+
)
|
|
95
|
+
self.queue_name = queue_name
|
|
96
|
+
|
|
97
|
+
async def put(self, task: TaskModel) -> bool:
|
|
98
|
+
"""
|
|
99
|
+
添加任务到队列(使用Pydantic V2的model_dump_json)
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
task: 已验证的TaskModel实例
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
bool: 是否成功添加 (Redis的ZADD返回添加数量)
|
|
106
|
+
"""
|
|
107
|
+
with warnings.catch_warnings():
|
|
108
|
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
109
|
+
task_str = task.model_dump_json() # 正确使用V2的序列化方法
|
|
110
|
+
return await self.redis.zadd(
|
|
111
|
+
self.queue_name,
|
|
112
|
+
{task_str: task.priority}
|
|
113
|
+
) > 0
|
|
114
|
+
|
|
115
|
+
async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
|
|
116
|
+
"""
|
|
117
|
+
获取优先级最高的任务(自动验证)
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
timeout: 阻塞超时时间(秒)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
TaskModel实例或None(超时/队列空)
|
|
124
|
+
"""
|
|
125
|
+
try:
|
|
126
|
+
result = await self.redis.bzpopmax(
|
|
127
|
+
self.queue_name,
|
|
128
|
+
timeout=timeout
|
|
129
|
+
)
|
|
130
|
+
if result:
|
|
131
|
+
_, task_str, _ = result
|
|
132
|
+
with warnings.catch_warnings():
|
|
133
|
+
warnings.simplefilter("ignore", category=DeprecationWarning)
|
|
134
|
+
return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
|
|
135
|
+
except Exception as e:
|
|
136
|
+
print(f"任务获取失败: {type(e).__name__}: {e}")
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
async def aclose(self):
|
|
140
|
+
"""安全关闭连接"""
|
|
141
|
+
await self.redis.aclose()
|
|
142
|
+
|
|
143
|
+
async def __aenter__(self):
|
|
144
|
+
return self
|
|
145
|
+
|
|
146
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
147
|
+
await self.aclose()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# 使用示例
|
|
151
|
+
async def demo():
|
|
152
|
+
async with DistributedPriorityQueue(
|
|
153
|
+
REDIS_URL,
|
|
154
|
+
max_connections=20,
|
|
155
|
+
health_check_interval=10
|
|
156
|
+
) as queue:
|
|
157
|
+
# 添加任务(自动触发验证)
|
|
158
|
+
task = TaskModel(
|
|
159
|
+
url="https://example.com/1",
|
|
160
|
+
priority=1,
|
|
161
|
+
meta={"depth": 2}
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
if await queue.put(task):
|
|
165
|
+
print(f"任务添加成功: {task.url}")
|
|
166
|
+
|
|
167
|
+
# 获取任务
|
|
168
|
+
if result := await queue.get(timeout=2.0):
|
|
169
|
+
print(f"获取任务: {result.url} (优先级={result.priority})")
|
|
170
|
+
print(f"元数据: {result.meta}")
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
if __name__ == "__main__":
|
|
174
174
|
asyncio.run(demo())
|
crawlo/utils/project.py
CHANGED
|
@@ -1,59 +1,199 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
自动发现项目并创建 SettingManager 实例
|
|
5
|
+
|
|
6
|
+
该模块负责:
|
|
7
|
+
1. 向上搜索项目根目录(通过 crawlo.cfg 或 settings.py)
|
|
8
|
+
2. 将项目根目录加入 Python 路径 (sys.path)
|
|
9
|
+
3. 加载指定的 settings 模块
|
|
10
|
+
4. 返回一个已配置好的 SettingManager 实例
|
|
11
|
+
"""
|
|
12
|
+
import os
|
|
13
|
+
import sys
|
|
14
|
+
import configparser
|
|
15
|
+
from importlib import import_module
|
|
16
|
+
from inspect import iscoroutinefunction
|
|
17
|
+
from typing import Callable, Optional
|
|
18
|
+
|
|
19
|
+
from crawlo.utils.log import get_logger
|
|
20
|
+
from crawlo.settings.setting_manager import SettingManager
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
logger =get_logger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _find_project_root(start_path: str = '.') -> Optional[str]:
|
|
27
|
+
"""
|
|
28
|
+
从指定的起始路径开始,向上级目录递归搜索,寻找项目根目录。
|
|
29
|
+
搜索依据:
|
|
30
|
+
1. 优先查找 'crawlo.cfg' 文件。
|
|
31
|
+
2. 如果未找到 cfg 文件,则查找位于 Python 包内(即包含 __init__.py 的目录)的 'settings.py' 文件。
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
start_path (str): 搜索的起始路径,默认为当前工作目录 '.'。
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Optional[str]: 找到的项目根目录的绝对路径,如果未找到则返回 None。
|
|
38
|
+
"""
|
|
39
|
+
path = os.path.abspath(start_path)
|
|
40
|
+
logger.info(f"开始向上搜索项目根目录,起始路径: {path}")
|
|
41
|
+
|
|
42
|
+
while True:
|
|
43
|
+
# 1. 检查是否存在 crawlo.cfg 文件
|
|
44
|
+
cfg_file = os.path.join(path, 'crawlo.cfg')
|
|
45
|
+
if os.path.isfile(cfg_file):
|
|
46
|
+
logger.info(f"在路径 {path} 找到 'crawlo.cfg' 文件,确定为项目根目录。")
|
|
47
|
+
return path
|
|
48
|
+
|
|
49
|
+
# 2. 检查是否存在 settings.py 文件,并且它位于一个 Python 包中
|
|
50
|
+
settings_file = os.path.join(path, 'settings.py')
|
|
51
|
+
if os.path.isfile(settings_file):
|
|
52
|
+
init_file = os.path.join(path, '__init__.py')
|
|
53
|
+
if os.path.isfile(init_file):
|
|
54
|
+
logger.info(f"在路径 {path} 找到 'settings.py' 文件,确定为项目根目录。")
|
|
55
|
+
return path
|
|
56
|
+
else:
|
|
57
|
+
logger.debug(f"在路径 {path} 找到 'settings.py',但缺少 '__init__.py',忽略。")
|
|
58
|
+
|
|
59
|
+
# 移动到上一级目录
|
|
60
|
+
parent = os.path.dirname(path)
|
|
61
|
+
if parent == path:
|
|
62
|
+
# 已经到达文件系统根目录
|
|
63
|
+
break
|
|
64
|
+
path = parent
|
|
65
|
+
|
|
66
|
+
logger.warning("向上搜索完毕,未找到项目根目录。")
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _get_settings_module_from_cfg(cfg_path: str) -> str:
|
|
71
|
+
"""
|
|
72
|
+
从 crawlo.cfg 配置文件中读取 settings 模块的路径。
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
cfg_path (str): crawlo.cfg 文件的完整路径。
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
str: settings 模块的导入路径,例如 'myproject.settings'。
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
RuntimeError: 当读取文件或解析配置出错时抛出。
|
|
82
|
+
"""
|
|
83
|
+
logger.info(f"正在读取配置文件: {cfg_path}")
|
|
84
|
+
config = configparser.ConfigParser()
|
|
85
|
+
try:
|
|
86
|
+
config.read(cfg_path, encoding='utf-8')
|
|
87
|
+
if config.has_section('settings') and config.has_option('settings', 'default'):
|
|
88
|
+
module_path = config.get('settings', 'default')
|
|
89
|
+
logger.info(f"从 'crawlo.cfg' 中读取到 settings 模块路径: {module_path}")
|
|
90
|
+
return module_path
|
|
91
|
+
else:
|
|
92
|
+
error_msg = f"配置文件 '{cfg_path}' 缺少 '[settings]' 或 'default' 配置项。"
|
|
93
|
+
logger.error(error_msg)
|
|
94
|
+
raise RuntimeError(error_msg)
|
|
95
|
+
except (configparser.Error, OSError) as e:
|
|
96
|
+
error_msg = f"读取或解析配置文件 '{cfg_path}' 时出错: {e}"
|
|
97
|
+
logger.error(error_msg)
|
|
98
|
+
raise RuntimeError(error_msg)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_settings(custom_settings=None):
|
|
102
|
+
"""
|
|
103
|
+
获取配置管理器实例的主函数。
|
|
104
|
+
此函数会自动发现项目,加载配置,并返回一个配置好的 SettingManager。
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
custom_settings (dict, optional): 运行时传入的自定义设置字典,会覆盖 settings.py 中的同名配置。
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
SettingManager: 一个已加载所有配置的 SettingManager 实例。
|
|
111
|
+
|
|
112
|
+
Raises:
|
|
113
|
+
RuntimeError: 当无法找到项目或配置文件时。
|
|
114
|
+
ImportError: 当无法导入指定的 settings 模块时。
|
|
115
|
+
"""
|
|
116
|
+
logger.info("正在初始化配置管理器...")
|
|
117
|
+
|
|
118
|
+
# 1. 发现项目根目录
|
|
119
|
+
project_root = _find_project_root()
|
|
120
|
+
if not project_root:
|
|
121
|
+
error_msg = "未找到 Crawlo 项目。请确保您正在包含 'crawlo.cfg' 或 'settings.py' 的项目目录中运行。"
|
|
122
|
+
logger.error(error_msg)
|
|
123
|
+
raise RuntimeError(error_msg)
|
|
124
|
+
|
|
125
|
+
logger.info(f"项目根目录已确定: {project_root}")
|
|
126
|
+
|
|
127
|
+
# 2. 确定 settings 模块的导入路径
|
|
128
|
+
settings_module_path = None
|
|
129
|
+
|
|
130
|
+
# 优先从 crawlo.cfg 中读取
|
|
131
|
+
cfg_file = os.path.join(project_root, 'crawlo.cfg')
|
|
132
|
+
if os.path.isfile(cfg_file):
|
|
133
|
+
settings_module_path = _get_settings_module_from_cfg(cfg_file)
|
|
134
|
+
else:
|
|
135
|
+
logger.info("未找到 'crawlo.cfg',尝试推断 settings 模块路径...")
|
|
136
|
+
# 推断:项目目录名.settings
|
|
137
|
+
project_name = os.path.basename(project_root)
|
|
138
|
+
settings_module_path = f"{project_name}.settings"
|
|
139
|
+
logger.info(f"推断 settings 模块路径为: {settings_module_path}")
|
|
140
|
+
|
|
141
|
+
# 3. 将项目根目录添加到 Python 路径,确保可以成功导入
|
|
142
|
+
if project_root not in sys.path:
|
|
143
|
+
sys.path.insert(0, project_root)
|
|
144
|
+
logger.info(f"已将项目根目录 '{project_root}' 添加到 Python 路径。")
|
|
145
|
+
else:
|
|
146
|
+
logger.debug(f"项目根目录 '{project_root}' 已在 Python 路径中。")
|
|
147
|
+
|
|
148
|
+
# 4. 创建 SettingManager 并加载配置
|
|
149
|
+
logger.info(f"正在加载 settings 模块: {settings_module_path}")
|
|
150
|
+
settings = SettingManager()
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
# 这会触发 SettingManager.set_settings(),从模块中加载所有大写常量
|
|
154
|
+
settings.set_settings(settings_module_path)
|
|
155
|
+
logger.info("settings 模块加载成功。")
|
|
156
|
+
except Exception as e:
|
|
157
|
+
error_msg = f"加载 settings 模块 '{settings_module_path}' 失败: {e}"
|
|
158
|
+
logger.error(error_msg)
|
|
159
|
+
raise ImportError(error_msg)
|
|
160
|
+
|
|
161
|
+
# 5. 应用运行时自定义设置
|
|
162
|
+
if custom_settings:
|
|
163
|
+
logger.info(f"正在应用运行时自定义设置: {custom_settings}")
|
|
164
|
+
settings.update_attributes(custom_settings)
|
|
165
|
+
logger.info("运行时自定义设置已应用。")
|
|
166
|
+
|
|
167
|
+
logger.info("配置管理器初始化完成。")
|
|
168
|
+
return settings
|
|
169
|
+
|
|
170
|
+
def load_class(_path):
|
|
171
|
+
if not isinstance(_path, str):
|
|
172
|
+
if callable(_path):
|
|
173
|
+
return _path
|
|
174
|
+
else:
|
|
175
|
+
raise TypeError(f"args expect str or object, got {_path}")
|
|
176
|
+
|
|
177
|
+
module_name, class_name = _path.rsplit('.', 1)
|
|
178
|
+
module = import_module(module_name)
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
cls = getattr(module, class_name)
|
|
182
|
+
except AttributeError:
|
|
183
|
+
raise NameError(f"Module {module_name!r} has no class named {class_name!r}")
|
|
184
|
+
return cls
|
|
185
|
+
|
|
186
|
+
def merge_settings(spider, settings):
|
|
187
|
+
spider_name = getattr(spider, 'name', 'UnknownSpider')
|
|
188
|
+
if hasattr(spider, 'custom_settings'):
|
|
189
|
+
custom_settings = getattr(spider, 'custom_settings')
|
|
190
|
+
settings.update_attributes(custom_settings)
|
|
191
|
+
else:
|
|
192
|
+
logger.debug(f"爬虫 '{spider_name}' 无 custom_settings,跳过合并") # 添加日志
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
async def common_call(func: Callable, *args, **kwargs):
|
|
196
|
+
if iscoroutinefunction(func):
|
|
197
|
+
return await func(*args, **kwargs)
|
|
198
|
+
else:
|
|
199
|
+
return func(*args, **kwargs)
|