crawlo 1.0.5__py3-none-any.whl → 1.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__version__.py +1 -1
- crawlo/cli.py +41 -0
- crawlo/commands/__init__.py +10 -0
- crawlo/commands/genspider.py +111 -0
- crawlo/commands/run.py +149 -0
- crawlo/commands/startproject.py +101 -0
- crawlo/crawler.py +1 -206
- crawlo/exceptions.py +5 -0
- crawlo/items/__init__.py +18 -58
- crawlo/items/base.py +31 -0
- crawlo/items/fields.py +54 -0
- crawlo/items/items.py +10 -20
- crawlo/settings/default_settings.py +1 -1
- crawlo/templates/crawlo.cfg.tmpl +11 -0
- crawlo/templates/project/__init__.py.tmpl +4 -0
- crawlo/templates/project/items.py.tmpl +18 -0
- crawlo/templates/project/middlewares.py.tmpl +76 -0
- crawlo/templates/project/pipelines.py.tmpl +64 -0
- crawlo/templates/project/settings.py.tmpl +54 -0
- crawlo/templates/project/spiders/__init__.py.tmpl +6 -0
- crawlo/templates/spider/spider.py.tmpl +32 -0
- crawlo/utils/project.py +159 -19
- crawlo/utils/spider_loader.py +63 -0
- {crawlo-1.0.5.dist-info → crawlo-1.0.6.dist-info}/METADATA +1 -1
- {crawlo-1.0.5.dist-info → crawlo-1.0.6.dist-info}/RECORD +32 -22
- crawlo-1.0.6.dist-info/entry_points.txt +2 -0
- examples/gxb/items.py +1 -1
- examples/gxb/run.py +2 -1
- examples/gxb/settings.py +2 -1
- examples/gxb/spider/{telecom_device_licenses.py → telecom_device.py} +1 -1
- crawlo/templates/item_template.tmpl +0 -22
- crawlo/templates/project_template/items/__init__.py +0 -0
- crawlo/templates/project_template/main.py +0 -33
- crawlo/templates/project_template/setting.py +0 -190
- crawlo/templates/project_template/spiders/__init__.py +0 -0
- crawlo/templates/spider_template.tmpl +0 -31
- crawlo-1.0.5.dist-info/entry_points.txt +0 -2
- {crawlo-1.0.5.dist-info → crawlo-1.0.6.dist-info}/WHEEL +0 -0
- {crawlo-1.0.5.dist-info → crawlo-1.0.6.dist-info}/top_level.txt +0 -0
crawlo/__version__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.0.
|
|
1
|
+
__version__ = "1.0.6"
|
crawlo/cli.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# crawlo/cli.py
|
|
2
|
+
# !/usr/bin/python
|
|
3
|
+
# -*- coding: UTF-8 -*-
|
|
4
|
+
import sys
|
|
5
|
+
import argparse
|
|
6
|
+
from crawlo.commands import get_commands
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def main():
|
|
10
|
+
# 获取所有可用命令
|
|
11
|
+
commands = get_commands()
|
|
12
|
+
|
|
13
|
+
parser = argparse.ArgumentParser(
|
|
14
|
+
description="Crawlo: A lightweight web crawler framework.",
|
|
15
|
+
usage="crawlo <command> [options]"
|
|
16
|
+
)
|
|
17
|
+
parser.add_argument('command', help='Available commands: ' + ', '.join(commands.keys()))
|
|
18
|
+
# 注意:这里不添加具体参数,由子命令解析
|
|
19
|
+
|
|
20
|
+
# 只解析命令
|
|
21
|
+
args, unknown = parser.parse_known_args()
|
|
22
|
+
|
|
23
|
+
if args.command not in commands:
|
|
24
|
+
print(f"Unknown command: {args.command}")
|
|
25
|
+
print(f"Available commands: {', '.join(commands.keys())}")
|
|
26
|
+
sys.exit(1)
|
|
27
|
+
|
|
28
|
+
# 动态导入并执行命令
|
|
29
|
+
try:
|
|
30
|
+
module = __import__(commands[args.command], fromlist=['main'])
|
|
31
|
+
sys.exit(module.main(unknown))
|
|
32
|
+
except ImportError as e:
|
|
33
|
+
print(f"Failed to load command '{args.command}': {e}")
|
|
34
|
+
sys.exit(1)
|
|
35
|
+
except Exception as e:
|
|
36
|
+
print(f"Command '{args.command}' failed: {e}")
|
|
37
|
+
sys.exit(1)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
if __name__ == '__main__':
|
|
41
|
+
main()
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import configparser
|
|
5
|
+
import importlib
|
|
6
|
+
|
|
7
|
+
TEMPLATES_DIR = Path(__file__).parent.parent / 'templates'
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _render_template(tmpl_path, context):
|
|
11
|
+
"""读取模板文件,替换 {{key}} 为 context 中的值"""
|
|
12
|
+
with open(tmpl_path, 'r', encoding='utf-8') as f:
|
|
13
|
+
content = f.read()
|
|
14
|
+
for key, value in context.items():
|
|
15
|
+
content = content.replace(f'{{{{{key}}}}}', str(value))
|
|
16
|
+
return content
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def main(args):
|
|
20
|
+
if len(args) < 2:
|
|
21
|
+
print("Usage: crawlo genspider <spider_name> <domain>")
|
|
22
|
+
return 1
|
|
23
|
+
|
|
24
|
+
spider_name = args[0]
|
|
25
|
+
domain = args[1]
|
|
26
|
+
|
|
27
|
+
# 查找项目根目录
|
|
28
|
+
project_root = None
|
|
29
|
+
current = Path.cwd()
|
|
30
|
+
while True:
|
|
31
|
+
cfg_file = current / 'crawlo.cfg'
|
|
32
|
+
if cfg_file.exists():
|
|
33
|
+
project_root = current
|
|
34
|
+
break
|
|
35
|
+
parent = current.parent
|
|
36
|
+
if parent == current:
|
|
37
|
+
break
|
|
38
|
+
current = parent
|
|
39
|
+
|
|
40
|
+
if not project_root:
|
|
41
|
+
print("Error: Not a crawlo project. crawlo.cfg not found.")
|
|
42
|
+
return 1
|
|
43
|
+
|
|
44
|
+
# 将项目根目录加入 sys.path
|
|
45
|
+
if str(project_root) not in sys.path:
|
|
46
|
+
sys.path.insert(0, str(project_root))
|
|
47
|
+
|
|
48
|
+
# 从 crawlo.cfg 读取 settings 模块,获取项目包名
|
|
49
|
+
config = configparser.ConfigParser()
|
|
50
|
+
try:
|
|
51
|
+
config.read(cfg_file, encoding='utf-8')
|
|
52
|
+
settings_module = config.get('settings', 'default')
|
|
53
|
+
project_package = settings_module.split('.')[0] # e.g., myproject.settings -> myproject
|
|
54
|
+
except Exception as e:
|
|
55
|
+
print(f"Error reading crawlo.cfg: {e}")
|
|
56
|
+
return 1
|
|
57
|
+
|
|
58
|
+
# 确定 items 模块的路径
|
|
59
|
+
items_module_path = f"{project_package}.items"
|
|
60
|
+
|
|
61
|
+
# 尝试导入 items 模块
|
|
62
|
+
try:
|
|
63
|
+
items_module = importlib.import_module(items_module_path)
|
|
64
|
+
# 获取模块中所有大写开头的类
|
|
65
|
+
item_classes = [cls for cls in items_module.__dict__.values()
|
|
66
|
+
if isinstance(cls, type) and cls.__name__.isupper()]
|
|
67
|
+
|
|
68
|
+
# 如果找到了类,使用第一个作为默认
|
|
69
|
+
if item_classes:
|
|
70
|
+
default_item_class = item_classes[0].__name__
|
|
71
|
+
else:
|
|
72
|
+
default_item_class = "ExampleItem" # 回退到示例
|
|
73
|
+
except ImportError as e:
|
|
74
|
+
print(f"Error importing items module '{items_module_path}': {e}")
|
|
75
|
+
default_item_class = "ExampleItem"
|
|
76
|
+
|
|
77
|
+
# 创建爬虫文件
|
|
78
|
+
spiders_dir = project_root / project_package / 'spiders'
|
|
79
|
+
if not spiders_dir.exists():
|
|
80
|
+
spiders_dir.mkdir(parents=True)
|
|
81
|
+
|
|
82
|
+
spider_file = spiders_dir / f'{spider_name}.py'
|
|
83
|
+
if spider_file.exists():
|
|
84
|
+
print(f"Error: Spider '{spider_name}' already exists.")
|
|
85
|
+
return 1
|
|
86
|
+
|
|
87
|
+
# ✅ 修正模板路径
|
|
88
|
+
tmpl_path = TEMPLATES_DIR / 'spider' / 'spider.py.tmpl'
|
|
89
|
+
|
|
90
|
+
if not tmpl_path.exists():
|
|
91
|
+
print(f"Error: Template file not found at {tmpl_path}")
|
|
92
|
+
return 1
|
|
93
|
+
|
|
94
|
+
# ✅ 生成正确的类名
|
|
95
|
+
class_name = f"{spider_name.capitalize()}Spider"
|
|
96
|
+
|
|
97
|
+
context = {
|
|
98
|
+
'spider_name': spider_name,
|
|
99
|
+
'domain': domain,
|
|
100
|
+
'project_name': project_package,
|
|
101
|
+
'item_class': default_item_class,
|
|
102
|
+
'class_name': class_name # ✅ 添加处理好的类名
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
content = _render_template(tmpl_path, context)
|
|
106
|
+
|
|
107
|
+
with open(spider_file, 'w', encoding='utf-8') as f:
|
|
108
|
+
f.write(content)
|
|
109
|
+
|
|
110
|
+
print(f"Spider '{spider_name}' created in {spider_file}")
|
|
111
|
+
return 0
|
crawlo/commands/run.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
# crawlo/commands/run.py
|
|
2
|
+
import asyncio
|
|
3
|
+
import importlib
|
|
4
|
+
import sys
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
import configparser
|
|
7
|
+
|
|
8
|
+
from crawlo.crawler import CrawlerProcess
|
|
9
|
+
from crawlo.utils.project import get_settings
|
|
10
|
+
from crawlo.utils.log import get_logger
|
|
11
|
+
from crawlo.utils.spider_loader import SpiderLoader
|
|
12
|
+
|
|
13
|
+
logger = get_logger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def main(args):
|
|
17
|
+
"""
|
|
18
|
+
运行指定爬虫的主函数
|
|
19
|
+
用法: crawlo run <spider_name>
|
|
20
|
+
"""
|
|
21
|
+
if len(args) < 1:
|
|
22
|
+
print("Usage: crawlo run <spider_name>")
|
|
23
|
+
print("Example: crawlo run baidu")
|
|
24
|
+
return 1
|
|
25
|
+
|
|
26
|
+
spider_name = args[0]
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
# 1. 获取项目根目录
|
|
30
|
+
project_root = get_settings()
|
|
31
|
+
|
|
32
|
+
# 将项目根目录添加到 Python 路径
|
|
33
|
+
if str(project_root) not in sys.path:
|
|
34
|
+
sys.path.insert(0, str(project_root))
|
|
35
|
+
|
|
36
|
+
# 2. 读取配置文件获取项目包名
|
|
37
|
+
cfg_file = project_root / 'crawlo.cfg'
|
|
38
|
+
if not cfg_file.exists():
|
|
39
|
+
print(f"❌ Error: crawlo.cfg not found in {project_root}")
|
|
40
|
+
return 1
|
|
41
|
+
|
|
42
|
+
config = configparser.ConfigParser()
|
|
43
|
+
config.read(cfg_file, encoding='utf-8')
|
|
44
|
+
|
|
45
|
+
if not config.has_section('settings') or not config.has_option('settings', 'default'):
|
|
46
|
+
print("❌ Error: Missing [settings] section or 'default' option in crawlo.cfg")
|
|
47
|
+
return 1
|
|
48
|
+
|
|
49
|
+
settings_module = config.get('settings', 'default')
|
|
50
|
+
project_package = settings_module.split('.')[0]
|
|
51
|
+
|
|
52
|
+
# 3. 查找并加载指定名称的 Spider
|
|
53
|
+
spider_class = find_spider_by_name(project_package, spider_name)
|
|
54
|
+
if spider_class is None:
|
|
55
|
+
return 1
|
|
56
|
+
|
|
57
|
+
# 4. 创建 CrawlerProcess 并运行单个爬虫
|
|
58
|
+
settings = get_settings()
|
|
59
|
+
process = CrawlerProcess(settings)
|
|
60
|
+
|
|
61
|
+
print(f"🚀 Starting spider: {spider_class.name}")
|
|
62
|
+
print(f"📁 Project: {project_package}")
|
|
63
|
+
print(f"🕷️ Class: {spider_class.__name__}")
|
|
64
|
+
print("-" * 50)
|
|
65
|
+
|
|
66
|
+
# 运行单个爬虫
|
|
67
|
+
asyncio.run(process.crawl(spider_class))
|
|
68
|
+
|
|
69
|
+
print("-" * 50)
|
|
70
|
+
print("✅ Spider completed successfully!")
|
|
71
|
+
return 0
|
|
72
|
+
|
|
73
|
+
except Exception as e:
|
|
74
|
+
print(f"❌ Error running spider: {e}")
|
|
75
|
+
import traceback
|
|
76
|
+
traceback.print_exc()
|
|
77
|
+
return 1
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def find_spider_by_name(project_package: str, target_spider_name: str):
|
|
81
|
+
"""使用 SpiderLoader 查找爬虫"""
|
|
82
|
+
loader = SpiderLoader(project_package)
|
|
83
|
+
spider_class = loader.load(target_spider_name)
|
|
84
|
+
|
|
85
|
+
if spider_class is None:
|
|
86
|
+
print(f"❌ Error: Spider with name '{target_spider_name}' not found")
|
|
87
|
+
print("💡 Available spiders:")
|
|
88
|
+
available_spiders = loader.list()
|
|
89
|
+
for spider_name in available_spiders:
|
|
90
|
+
print(f" - {spider_name}")
|
|
91
|
+
return None
|
|
92
|
+
|
|
93
|
+
return spider_class
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def list_available_spiders(project_package: str):
|
|
97
|
+
"""
|
|
98
|
+
列出所有可用的爬虫
|
|
99
|
+
"""
|
|
100
|
+
spiders_dir = Path.cwd() / project_package / 'spiders'
|
|
101
|
+
if not spiders_dir.exists():
|
|
102
|
+
print(" No spiders directory found")
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
spider_count = 0
|
|
106
|
+
for py_file in spiders_dir.glob("*.py"):
|
|
107
|
+
if py_file.name.startswith('_'):
|
|
108
|
+
continue
|
|
109
|
+
|
|
110
|
+
module_name = py_file.stem
|
|
111
|
+
spider_module_path = f"{project_package}.spiders.{module_name}"
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
module = importlib.import_module(spider_module_path)
|
|
115
|
+
except ImportError:
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
# 查找模块中所有 Spider 子类
|
|
119
|
+
from crawlo.spider import Spider
|
|
120
|
+
for attr_name in dir(module):
|
|
121
|
+
attr_value = getattr(module, attr_name)
|
|
122
|
+
if (isinstance(attr_value, type) and
|
|
123
|
+
issubclass(attr_value, Spider) and
|
|
124
|
+
attr_value != Spider and
|
|
125
|
+
hasattr(attr_value, 'name')):
|
|
126
|
+
print(f" - {attr_value.name} (class: {attr_value.__name__}, module: {module_name})")
|
|
127
|
+
spider_count += 1
|
|
128
|
+
|
|
129
|
+
if spider_count == 0:
|
|
130
|
+
print(" No spiders found")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def run_spider_by_name(spider_name: str, project_root: Path = None):
|
|
134
|
+
"""
|
|
135
|
+
直接在代码中通过 spider name 运行爬虫
|
|
136
|
+
"""
|
|
137
|
+
if project_root:
|
|
138
|
+
if str(project_root) not in sys.path:
|
|
139
|
+
sys.path.insert(0, str(project_root))
|
|
140
|
+
|
|
141
|
+
args = [spider_name]
|
|
142
|
+
return main(args)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
if __name__ == '__main__':
|
|
146
|
+
# 允许直接运行: python -m crawlo.commands.run <spider_name>
|
|
147
|
+
import sys
|
|
148
|
+
|
|
149
|
+
sys.exit(main(sys.argv[1:]))
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# crawlo/commands/startproject.py
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
TEMPLATES_DIR = Path(__file__).parent.parent / 'templates'
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _render_template(tmpl_path, context):
|
|
10
|
+
"""读取模板文件,替换 {{key}} 为 context 中的值"""
|
|
11
|
+
with open(tmpl_path, 'r', encoding='utf-8') as f:
|
|
12
|
+
content = f.read()
|
|
13
|
+
for key, value in context.items():
|
|
14
|
+
content = content.replace(f'{{{{{key}}}}}', str(value))
|
|
15
|
+
return content
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _copytree_with_templates(src, dst, context):
|
|
19
|
+
"""
|
|
20
|
+
递归复制目录,将 .tmpl 文件渲染后复制(去除 .tmpl 后缀),其他文件直接复制。
|
|
21
|
+
"""
|
|
22
|
+
src_path = Path(src)
|
|
23
|
+
dst_path = Path(dst)
|
|
24
|
+
dst_path.mkdir(parents=True, exist_ok=True)
|
|
25
|
+
|
|
26
|
+
for item in src_path.rglob('*'):
|
|
27
|
+
rel_path = item.relative_to(src_path)
|
|
28
|
+
dst_item = dst_path / rel_path
|
|
29
|
+
|
|
30
|
+
if item.is_dir():
|
|
31
|
+
# 创建目标目录
|
|
32
|
+
dst_item.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
else:
|
|
34
|
+
if item.suffix == '.tmpl':
|
|
35
|
+
# 渲染模板文件,并去掉 .tmpl 后缀
|
|
36
|
+
rendered_content = _render_template(item, context)
|
|
37
|
+
final_dst = dst_item.with_suffix('') # 去掉 .tmpl
|
|
38
|
+
final_dst.parent.mkdir(parents=True, exist_ok=True) # 确保父目录存在
|
|
39
|
+
with open(final_dst, 'w', encoding='utf-8') as f:
|
|
40
|
+
f.write(rendered_content)
|
|
41
|
+
else:
|
|
42
|
+
# 普通文件,直接复制
|
|
43
|
+
shutil.copy2(item, dst_item)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def main(args):
|
|
47
|
+
if len(args) != 1:
|
|
48
|
+
print("Usage: crawlo startproject <project_name>")
|
|
49
|
+
return 1
|
|
50
|
+
|
|
51
|
+
project_name = args[0]
|
|
52
|
+
project_dir = Path(project_name)
|
|
53
|
+
|
|
54
|
+
if project_dir.exists():
|
|
55
|
+
print(f"Error: Directory '{project_dir}' already exists.")
|
|
56
|
+
return 1
|
|
57
|
+
|
|
58
|
+
context = {'project_name': project_name}
|
|
59
|
+
template_dir = TEMPLATES_DIR / 'project'
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
# 1. 创建项目根目录
|
|
63
|
+
project_dir.mkdir()
|
|
64
|
+
|
|
65
|
+
# 2. 处理 crawlo.cfg.tmpl:单独渲染并写入项目根目录
|
|
66
|
+
cfg_template = TEMPLATES_DIR / 'crawlo.cfg.tmpl' # ✅ 使用 templates/ 目录下的模板
|
|
67
|
+
if cfg_template.exists():
|
|
68
|
+
cfg_content = _render_template(cfg_template, context)
|
|
69
|
+
(project_dir / 'crawlo.cfg').write_text(cfg_content, encoding='utf-8')
|
|
70
|
+
else:
|
|
71
|
+
print("Warning: crawlo.cfg.tmpl not found in templates.")
|
|
72
|
+
|
|
73
|
+
# 3. 复制所有其他模板文件到项目包内 (project_dir / project_name)
|
|
74
|
+
package_dir = project_dir / project_name
|
|
75
|
+
# 这会复制 __init__.py.tmpl, items.py.tmpl, settings.py.tmpl, spiders/ 等
|
|
76
|
+
# 并将它们渲染为 .py 文件
|
|
77
|
+
_copytree_with_templates(template_dir, package_dir, context)
|
|
78
|
+
|
|
79
|
+
# 4. 创建 logs 目录
|
|
80
|
+
(project_dir / 'logs').mkdir(exist_ok=True)
|
|
81
|
+
|
|
82
|
+
print(f"""
|
|
83
|
+
✔ 项目 '{project_name}' 创建成功!
|
|
84
|
+
|
|
85
|
+
进入项目目录:
|
|
86
|
+
cd {project_name}
|
|
87
|
+
|
|
88
|
+
创建一个爬虫:
|
|
89
|
+
crawlo genspider example example.com
|
|
90
|
+
|
|
91
|
+
运行爬虫:
|
|
92
|
+
crawlo run example
|
|
93
|
+
""")
|
|
94
|
+
return 0
|
|
95
|
+
|
|
96
|
+
except Exception as e:
|
|
97
|
+
print(f"Error creating project: {e}")
|
|
98
|
+
# 如果出错,尝试清理已创建的目录
|
|
99
|
+
if project_dir.exists():
|
|
100
|
+
shutil.rmtree(project_dir, ignore_errors=True)
|
|
101
|
+
return 1
|
crawlo/crawler.py
CHANGED
|
@@ -216,209 +216,4 @@ class CrawlerProcess:
|
|
|
216
216
|
return get_settings()
|
|
217
217
|
except Exception as e:
|
|
218
218
|
logger.warning(f"无法加载默认配置: {e}")
|
|
219
|
-
return SettingManager()
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
# #!/usr/bin/python
|
|
224
|
-
# # -*- coding:UTF-8 -*
|
|
225
|
-
# import signal
|
|
226
|
-
# import asyncio
|
|
227
|
-
# from typing import Final, Set, Optional
|
|
228
|
-
#
|
|
229
|
-
# from crawlo.spider import Spider
|
|
230
|
-
# from crawlo.core.engine import Engine
|
|
231
|
-
# from crawlo.utils.log import get_logger
|
|
232
|
-
# from crawlo.subscriber import Subscriber
|
|
233
|
-
# from crawlo.extension import ExtensionManager
|
|
234
|
-
# from crawlo.exceptions import SpiderTypeError
|
|
235
|
-
# from crawlo.stats_collector import StatsCollector
|
|
236
|
-
# from crawlo.event import spider_opened, spider_closed
|
|
237
|
-
# from crawlo.settings.setting_manager import SettingManager
|
|
238
|
-
# from crawlo.utils.project import merge_settings, get_settings
|
|
239
|
-
#
|
|
240
|
-
# logger = get_logger(__name__)
|
|
241
|
-
#
|
|
242
|
-
#
|
|
243
|
-
# class Crawler:
|
|
244
|
-
#
|
|
245
|
-
# def __init__(self, spider_cls, settings):
|
|
246
|
-
# self.spider_cls = spider_cls
|
|
247
|
-
# self.spider: Optional[Spider] = None
|
|
248
|
-
# self.engine: Optional[Engine] = None
|
|
249
|
-
# self.stats: Optional[StatsCollector] = None
|
|
250
|
-
# self.subscriber: Optional[Subscriber] = None
|
|
251
|
-
# self.extension: Optional[ExtensionManager] = None
|
|
252
|
-
# self.settings: SettingManager = settings.copy()
|
|
253
|
-
#
|
|
254
|
-
# async def crawl(self):
|
|
255
|
-
# self.subscriber = self._create_subscriber()
|
|
256
|
-
# self.spider = self._create_spider()
|
|
257
|
-
# self.engine = self._create_engine()
|
|
258
|
-
# self.stats = self._create_stats()
|
|
259
|
-
# self.extension = self._create_extension()
|
|
260
|
-
#
|
|
261
|
-
# await self.engine.start_spider(self.spider)
|
|
262
|
-
#
|
|
263
|
-
# @staticmethod
|
|
264
|
-
# def _create_subscriber():
|
|
265
|
-
# return Subscriber()
|
|
266
|
-
#
|
|
267
|
-
# def _create_spider(self) -> Spider:
|
|
268
|
-
# spider = self.spider_cls.create_instance(self)
|
|
269
|
-
#
|
|
270
|
-
# # --- 关键属性检查 ---
|
|
271
|
-
# # 1. 检查 name
|
|
272
|
-
# if not getattr(spider, 'name', None):
|
|
273
|
-
# raise AttributeError(f"Spider class '{self.spider_cls.__name__}' must have a 'name' attribute.")
|
|
274
|
-
#
|
|
275
|
-
# # 2. 检查 start_requests 是否可调用
|
|
276
|
-
# if not callable(getattr(spider, 'start_requests', None)):
|
|
277
|
-
# raise AttributeError(f"Spider '{spider.name}' must have a callable 'start_requests' method.")
|
|
278
|
-
#
|
|
279
|
-
# # 3. 检查 start_urls 类型
|
|
280
|
-
# start_urls = getattr(spider, 'start_urls', [])
|
|
281
|
-
# if isinstance(start_urls, str):
|
|
282
|
-
# raise TypeError(f"'{spider.name}.start_urls' must be a list or tuple, not a string.")
|
|
283
|
-
#
|
|
284
|
-
# # --- 日志提示 ---
|
|
285
|
-
# # 提醒用户定义 parse 方法
|
|
286
|
-
# if not callable(getattr(spider, 'parse', None)):
|
|
287
|
-
# logger.warning(f"Spider '{spider.name}' lacks a 'parse' method. Ensure all Requests have callbacks.")
|
|
288
|
-
#
|
|
289
|
-
# self._set_spider(spider)
|
|
290
|
-
# return spider
|
|
291
|
-
#
|
|
292
|
-
# def _create_engine(self) -> Engine:
|
|
293
|
-
# engine = Engine(self)
|
|
294
|
-
# engine.engine_start()
|
|
295
|
-
# return engine
|
|
296
|
-
#
|
|
297
|
-
# def _create_stats(self) -> StatsCollector:
|
|
298
|
-
# stats = StatsCollector(self)
|
|
299
|
-
# return stats
|
|
300
|
-
#
|
|
301
|
-
# def _create_extension(self) -> ExtensionManager:
|
|
302
|
-
# extension = ExtensionManager.create_instance(self)
|
|
303
|
-
# return extension
|
|
304
|
-
#
|
|
305
|
-
# def _set_spider(self, spider):
|
|
306
|
-
# self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
|
|
307
|
-
# self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
|
|
308
|
-
# merge_settings(spider, self.settings)
|
|
309
|
-
#
|
|
310
|
-
# async def close(self, reason='finished') -> None:
|
|
311
|
-
# await asyncio.create_task(self.subscriber.notify(spider_closed))
|
|
312
|
-
# self.stats.close_spider(spider=self.spider, reason=reason)
|
|
313
|
-
#
|
|
314
|
-
#
|
|
315
|
-
# class CrawlerProcess:
|
|
316
|
-
# """爬虫处理类,支持跨平台动态并发控制和精细化日志"""
|
|
317
|
-
#
|
|
318
|
-
# def __init__(self, settings=None, max_concurrency: Optional[int] = None, batch_size: int = 10):
|
|
319
|
-
# self.crawlers: Final[Set] = set()
|
|
320
|
-
# self._active_spiders: Final[Set] = set()
|
|
321
|
-
# self.settings = settings or self._get_default_settings()
|
|
322
|
-
# self.batch_size = batch_size
|
|
323
|
-
#
|
|
324
|
-
# # 优先使用专用配置,降级使用 CONCURRENCY,最后用默认值
|
|
325
|
-
# self.max_concurrency = (
|
|
326
|
-
# max_concurrency or
|
|
327
|
-
# self.settings.get('MAX_RUNNING_SPIDERS') or
|
|
328
|
-
# self.settings.get('CONCURRENCY', 5)
|
|
329
|
-
# )
|
|
330
|
-
# self.semaphore = asyncio.Semaphore(self.max_concurrency)
|
|
331
|
-
#
|
|
332
|
-
# signal.signal(signal.SIGINT, self._shutdown)
|
|
333
|
-
# logger.debug(f"初始化爬虫处理进程,最大并发数: {self.max_concurrency}")
|
|
334
|
-
#
|
|
335
|
-
# async def crawl(self, spiders):
|
|
336
|
-
# """支持单个或多个爬虫的批量处理,优化日志输出"""
|
|
337
|
-
# if not spiders:
|
|
338
|
-
# raise ValueError("至少需要提供一个爬虫类")
|
|
339
|
-
#
|
|
340
|
-
# # 统一转换为列表
|
|
341
|
-
# if isinstance(spiders, type) and issubclass(spiders, Spider):
|
|
342
|
-
# spiders = [spiders]
|
|
343
|
-
# elif isinstance(spiders, (list, tuple)):
|
|
344
|
-
# spiders = list(spiders)
|
|
345
|
-
# else:
|
|
346
|
-
# raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
|
|
347
|
-
#
|
|
348
|
-
# # 按爬虫类名首字母排序(升序)
|
|
349
|
-
# spiders.sort(key=lambda x: x.__name__.lower())
|
|
350
|
-
#
|
|
351
|
-
# if len(spiders) == 1:
|
|
352
|
-
# logger.info(f"启动爬虫: {spiders[0].__name__}")
|
|
353
|
-
# else:
|
|
354
|
-
# logger.info(f"启动{len(spiders)}个爬虫,按名称排序后分批处理中")
|
|
355
|
-
#
|
|
356
|
-
# batches = [spiders[i:i + self.batch_size] for i in range(0, len(spiders), self.batch_size)]
|
|
357
|
-
#
|
|
358
|
-
# for batch_idx, batch in enumerate(batches):
|
|
359
|
-
# batch_tasks = set()
|
|
360
|
-
#
|
|
361
|
-
# for spider_cls in batch:
|
|
362
|
-
# crawler = self._create_crawler(spider_cls)
|
|
363
|
-
# self.crawlers.add(crawler)
|
|
364
|
-
#
|
|
365
|
-
# await self.semaphore.acquire()
|
|
366
|
-
# task = asyncio.create_task(self._run_crawler_with_semaphore(crawler))
|
|
367
|
-
# batch_tasks.add(task)
|
|
368
|
-
# self._active_spiders.add(task)
|
|
369
|
-
#
|
|
370
|
-
# if len(spiders) > 1: # 仅对多爬虫显示批次信息
|
|
371
|
-
# logger.info(f"启动第 {batch_idx + 1}/{len(batches)} 批爬虫,共 {len(batch)} 个")
|
|
372
|
-
#
|
|
373
|
-
# await asyncio.gather(*batch_tasks)
|
|
374
|
-
#
|
|
375
|
-
# if len(spiders) > 1: # 仅对多爬虫显示批次完成信息
|
|
376
|
-
# logger.info(f"第 {batch_idx + 1} 批爬虫处理完成")
|
|
377
|
-
#
|
|
378
|
-
# async def _run_crawler_with_semaphore(self, crawler):
|
|
379
|
-
# """使用信号量控制的爬虫运行函数"""
|
|
380
|
-
# try:
|
|
381
|
-
# await crawler.crawl()
|
|
382
|
-
# finally:
|
|
383
|
-
# self.semaphore.release() # 确保资源释放
|
|
384
|
-
#
|
|
385
|
-
# async def start(self):
|
|
386
|
-
# """启动所有爬虫任务"""
|
|
387
|
-
# if self._active_spiders:
|
|
388
|
-
# logger.info(f"启动 {len(self._active_spiders)} 个爬虫任务,计算得知当前设备最大并发限制: {self.max_concurrency}")
|
|
389
|
-
# await asyncio.gather(*self._active_spiders)
|
|
390
|
-
#
|
|
391
|
-
# def _create_crawler(self, spider_cls) -> Crawler:
|
|
392
|
-
# """创建爬虫实例"""
|
|
393
|
-
# if isinstance(spider_cls, str):
|
|
394
|
-
# raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
|
|
395
|
-
# crawler: Crawler = Crawler(spider_cls, self.settings)
|
|
396
|
-
# return crawler
|
|
397
|
-
#
|
|
398
|
-
# def _shutdown(self, _signum, _frame):
|
|
399
|
-
# """优雅关闭所有爬虫"""
|
|
400
|
-
# logger.warning(f"收到关闭信号,正在优雅关闭 {len(self.crawlers)} 个爬虫...")
|
|
401
|
-
# for crawler in self.crawlers:
|
|
402
|
-
# if crawler.engine:
|
|
403
|
-
# crawler.engine.running = False
|
|
404
|
-
# crawler.engine.normal = False
|
|
405
|
-
# crawler.stats.close_spider(crawler.spider, 'shutdown signal')
|
|
406
|
-
#
|
|
407
|
-
# # 等待所有任务完成
|
|
408
|
-
# asyncio.create_task(self._wait_for_tasks())
|
|
409
|
-
#
|
|
410
|
-
# async def _wait_for_tasks(self):
|
|
411
|
-
# """等待所有活跃任务完成"""
|
|
412
|
-
# pending = [task for task in self._active_spiders if not task.done()]
|
|
413
|
-
# if pending:
|
|
414
|
-
# logger.info(f"等待 {len(pending)} 个活跃任务完成...")
|
|
415
|
-
# await asyncio.gather(*pending)
|
|
416
|
-
# logger.info("所有爬虫已优雅关闭")
|
|
417
|
-
#
|
|
418
|
-
# @classmethod
|
|
419
|
-
# def _get_default_settings(cls):
|
|
420
|
-
# """框架自动获取默认配置"""
|
|
421
|
-
# try:
|
|
422
|
-
# return get_settings()
|
|
423
|
-
# except ImportError:
|
|
424
|
-
# return {}
|
|
219
|
+
return SettingManager()
|