crawlo 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (120) hide show
  1. crawlo/__init__.py +34 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -155
  6. crawlo/commands/genspider.py +152 -111
  7. crawlo/commands/list.py +156 -119
  8. crawlo/commands/run.py +285 -170
  9. crawlo/commands/startproject.py +196 -101
  10. crawlo/commands/stats.py +188 -167
  11. crawlo/commands/utils.py +187 -0
  12. crawlo/config.py +280 -0
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -158
  15. crawlo/core/enhanced_engine.py +190 -0
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +162 -57
  18. crawlo/crawler.py +1028 -493
  19. crawlo/downloader/__init__.py +242 -78
  20. crawlo/downloader/aiohttp_downloader.py +212 -199
  21. crawlo/downloader/cffi_downloader.py +252 -277
  22. crawlo/downloader/httpx_downloader.py +257 -246
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +78 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -37
  30. crawlo/filters/aioredis_filter.py +242 -150
  31. crawlo/filters/memory_filter.py +269 -202
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -245
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -90
  45. crawlo/mode_manager.py +201 -0
  46. crawlo/network/__init__.py +21 -7
  47. crawlo/network/request.py +311 -203
  48. crawlo/network/response.py +269 -166
  49. crawlo/pipelines/__init__.py +13 -13
  50. crawlo/pipelines/console_pipeline.py +39 -39
  51. crawlo/pipelines/csv_pipeline.py +317 -0
  52. crawlo/pipelines/json_pipeline.py +219 -0
  53. crawlo/pipelines/mongo_pipeline.py +116 -116
  54. crawlo/pipelines/mysql_pipeline.py +195 -195
  55. crawlo/pipelines/pipeline_manager.py +56 -56
  56. crawlo/project.py +153 -0
  57. crawlo/queue/pqueue.py +37 -0
  58. crawlo/queue/queue_manager.py +304 -0
  59. crawlo/queue/redis_priority_queue.py +192 -0
  60. crawlo/settings/__init__.py +7 -7
  61. crawlo/settings/default_settings.py +226 -169
  62. crawlo/settings/setting_manager.py +99 -99
  63. crawlo/spider/__init__.py +639 -129
  64. crawlo/stats_collector.py +59 -59
  65. crawlo/subscriber.py +106 -106
  66. crawlo/task_manager.py +30 -27
  67. crawlo/templates/crawlo.cfg.tmpl +10 -10
  68. crawlo/templates/project/__init__.py.tmpl +3 -3
  69. crawlo/templates/project/items.py.tmpl +17 -17
  70. crawlo/templates/project/middlewares.py.tmpl +87 -76
  71. crawlo/templates/project/pipelines.py.tmpl +336 -64
  72. crawlo/templates/project/run.py.tmpl +239 -0
  73. crawlo/templates/project/settings.py.tmpl +248 -54
  74. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  75. crawlo/templates/spider/spider.py.tmpl +178 -32
  76. crawlo/utils/__init__.py +7 -7
  77. crawlo/utils/controlled_spider_mixin.py +336 -0
  78. crawlo/utils/date_tools.py +233 -233
  79. crawlo/utils/db_helper.py +343 -343
  80. crawlo/utils/func_tools.py +82 -82
  81. crawlo/utils/large_scale_config.py +287 -0
  82. crawlo/utils/large_scale_helper.py +344 -0
  83. crawlo/utils/log.py +128 -128
  84. crawlo/utils/queue_helper.py +176 -0
  85. crawlo/utils/request.py +267 -267
  86. crawlo/utils/request_serializer.py +220 -0
  87. crawlo/utils/spider_loader.py +62 -62
  88. crawlo/utils/system.py +11 -11
  89. crawlo/utils/tools.py +4 -4
  90. crawlo/utils/url.py +39 -39
  91. crawlo-1.1.2.dist-info/METADATA +567 -0
  92. crawlo-1.1.2.dist-info/RECORD +108 -0
  93. examples/__init__.py +7 -0
  94. tests/__init__.py +7 -7
  95. tests/test_final_validation.py +154 -0
  96. tests/test_proxy_health_check.py +32 -32
  97. tests/test_proxy_middleware_integration.py +136 -136
  98. tests/test_proxy_providers.py +56 -56
  99. tests/test_proxy_stats.py +19 -19
  100. tests/test_proxy_strategies.py +59 -59
  101. tests/test_redis_config.py +29 -0
  102. tests/test_redis_queue.py +225 -0
  103. tests/test_request_serialization.py +71 -0
  104. tests/test_scheduler.py +242 -0
  105. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  106. crawlo/utils/concurrency_manager.py +0 -125
  107. crawlo/utils/pqueue.py +0 -174
  108. crawlo/utils/project.py +0 -197
  109. crawlo-1.1.0.dist-info/METADATA +0 -49
  110. crawlo-1.1.0.dist-info/RECORD +0 -97
  111. examples/gxb/items.py +0 -36
  112. examples/gxb/run.py +0 -16
  113. examples/gxb/settings.py +0 -72
  114. examples/gxb/spider/__init__.py +0 -2
  115. examples/gxb/spider/miit_spider.py +0 -180
  116. examples/gxb/spider/telecom_device.py +0 -129
  117. {examples/gxb → crawlo/queue}/__init__.py +0 -0
  118. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
  119. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
  120. {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
@@ -1,111 +1,152 @@
1
- import os
2
- import sys
3
- from pathlib import Path
4
- import configparser
5
- import importlib
6
-
7
- TEMPLATES_DIR = Path(__file__).parent.parent / 'templates'
8
-
9
-
10
- def _render_template(tmpl_path, context):
11
- """读取模板文件,替换 {{key}} 为 context 中的值"""
12
- with open(tmpl_path, 'r', encoding='utf-8') as f:
13
- content = f.read()
14
- for key, value in context.items():
15
- content = content.replace(f'{{{{{key}}}}}', str(value))
16
- return content
17
-
18
-
19
- def main(args):
20
- if len(args) < 2:
21
- print("Usage: crawlo genspider <spider_name> <domain>")
22
- return 1
23
-
24
- spider_name = args[0]
25
- domain = args[1]
26
-
27
- # 查找项目根目录
28
- project_root = None
29
- current = Path.cwd()
30
- while True:
31
- cfg_file = current / 'crawlo.cfg'
32
- if cfg_file.exists():
33
- project_root = current
34
- break
35
- parent = current.parent
36
- if parent == current:
37
- break
38
- current = parent
39
-
40
- if not project_root:
41
- print("Error: Not a crawlo project. crawlo.cfg not found.")
42
- return 1
43
-
44
- # 将项目根目录加入 sys.path
45
- if str(project_root) not in sys.path:
46
- sys.path.insert(0, str(project_root))
47
-
48
- # 从 crawlo.cfg 读取 settings 模块,获取项目包名
49
- config = configparser.ConfigParser()
50
- try:
51
- config.read(cfg_file, encoding='utf-8')
52
- settings_module = config.get('settings', 'default')
53
- project_package = settings_module.split('.')[0] # e.g., myproject.settings -> myproject
54
- except Exception as e:
55
- print(f"Error reading crawlo.cfg: {e}")
56
- return 1
57
-
58
- # 确定 items 模块的路径
59
- items_module_path = f"{project_package}.items"
60
-
61
- # 尝试导入 items 模块
62
- try:
63
- items_module = importlib.import_module(items_module_path)
64
- # 获取模块中所有大写开头的类
65
- item_classes = [cls for cls in items_module.__dict__.values()
66
- if isinstance(cls, type) and cls.__name__.isupper()]
67
-
68
- # 如果找到了类,使用第一个作为默认
69
- if item_classes:
70
- default_item_class = item_classes[0].__name__
71
- else:
72
- default_item_class = "ExampleItem" # 回退到示例
73
- except ImportError as e:
74
- print(f"Error importing items module '{items_module_path}': {e}")
75
- default_item_class = "ExampleItem"
76
-
77
- # 创建爬虫文件
78
- spiders_dir = project_root / project_package / 'spiders'
79
- if not spiders_dir.exists():
80
- spiders_dir.mkdir(parents=True)
81
-
82
- spider_file = spiders_dir / f'{spider_name}.py'
83
- if spider_file.exists():
84
- print(f"Error: Spider '{spider_name}' already exists.")
85
- return 1
86
-
87
- # 修正模板路径
88
- tmpl_path = TEMPLATES_DIR / 'spider' / 'spider.py.tmpl'
89
-
90
- if not tmpl_path.exists():
91
- print(f"Error: Template file not found at {tmpl_path}")
92
- return 1
93
-
94
- # 生成正确的类名
95
- class_name = f"{spider_name.capitalize()}Spider"
96
-
97
- context = {
98
- 'spider_name': spider_name,
99
- 'domain': domain,
100
- 'project_name': project_package,
101
- 'item_class': default_item_class,
102
- 'class_name': class_name # ✅ 添加处理好的类名
103
- }
104
-
105
- content = _render_template(tmpl_path, context)
106
-
107
- with open(spider_file, 'w', encoding='utf-8') as f:
108
- f.write(content)
109
-
110
- print(f"Spider '{spider_name}' created in {spider_file}")
111
- return 0
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-08-31 22:36
5
+ # @Author : crawl-coder
6
+ # @Desc : 命令行入口:crawlo genspider baidu,创建爬虫。
7
+ """
8
+ import sys
9
+ from pathlib import Path
10
+ import configparser
11
+ import importlib
12
+ from rich.console import Console
13
+
14
+ from .utils import (
15
+ get_project_root,
16
+ validate_project_environment,
17
+ show_error_panel,
18
+ show_success_panel,
19
+ validate_spider_name,
20
+ is_valid_domain
21
+ )
22
+
23
+ # 初始化 rich 控制台
24
+ console = Console()
25
+
26
+ TEMPLATES_DIR = Path(__file__).parent.parent / 'templates'
27
+
28
+
29
+ def _render_template(tmpl_path, context):
30
+ """读取模板文件,替换 {{key}} 为 context 中的值"""
31
+ with open(tmpl_path, 'r', encoding='utf-8') as f:
32
+ content = f.read()
33
+ for key, value in context.items():
34
+ content = content.replace(f'{{{{{key}}}}}', str(value))
35
+ return content
36
+
37
+
38
+ def main(args):
39
+ if len(args) < 2:
40
+ console.print("[bold red]Error:[/bold red] Usage: [blue]crawlo genspider[/blue] <spider_name> <domain>")
41
+ console.print("💡 Examples:")
42
+ console.print(" [blue]crawlo genspider[/blue] news_spider news.example.com")
43
+ console.print(" [blue]crawlo genspider[/blue] product_spider shop.example.com")
44
+ return 1
45
+
46
+ spider_name = args[0]
47
+ domain = args[1]
48
+
49
+ # 验证爬虫名称
50
+ if not validate_spider_name(spider_name):
51
+ show_error_panel(
52
+ "Invalid Spider Name",
53
+ f"Spider name '[cyan]{spider_name}[/cyan]' is invalid.\n"
54
+ "💡 Spider name should:\n"
55
+ " Start with lowercase letter\n"
56
+ " • Contain only lowercase letters, numbers, and underscores\n"
57
+ " • Be a valid Python identifier"
58
+ )
59
+ return 1
60
+
61
+ # 验证域名格式
62
+ if not is_valid_domain(domain):
63
+ show_error_panel(
64
+ "Invalid Domain",
65
+ f"Domain '[cyan]{domain}[/cyan]' format is invalid.\n"
66
+ "💡 Please provide a valid domain name like 'example.com'"
67
+ )
68
+ return 1
69
+
70
+ # 验证项目环境
71
+ is_valid, project_package, error_msg = validate_project_environment()
72
+ if not is_valid:
73
+ show_error_panel("Not a Crawlo Project", error_msg)
74
+ return 1
75
+
76
+ project_root = get_project_root()
77
+
78
+ # 确定 items 模块的路径
79
+ items_module_path = f"{project_package}.items"
80
+
81
+ # 尝试导入 items 模块
82
+ default_item_class = "ExampleItem" # 默认回退
83
+ try:
84
+ items_module = importlib.import_module(items_module_path)
85
+ # 获取模块中所有大写开头的类
86
+ item_classes = [
87
+ cls for cls in items_module.__dict__.values()
88
+ if isinstance(cls, type) and cls.__name__[0].isupper() # 首字母大写
89
+ ]
90
+
91
+ if item_classes:
92
+ default_item_class = item_classes[0].__name__
93
+ else:
94
+ console.print("[yellow]:warning: Warning:[/yellow] No item class found in [cyan]items.py[/cyan], using [green]ExampleItem[/green].")
95
+
96
+ except ImportError as e:
97
+ console.print(f"[yellow]:warning: Warning:[/yellow] Failed to import [cyan]{items_module_path}[/cyan]: {e}")
98
+ # 仍使用默认 ExampleItem,不中断流程
99
+
100
+ # 创建爬虫文件
101
+ spiders_dir = project_root / project_package / 'spiders'
102
+ spiders_dir.mkdir(parents=True, exist_ok=True)
103
+
104
+ spider_file = spiders_dir / f'{spider_name}.py'
105
+ if spider_file.exists():
106
+ show_error_panel(
107
+ "Spider Already Exists",
108
+ f"Spider '[cyan]{spider_name}[/cyan]' already exists at\n[green]{spider_file}[/green]"
109
+ )
110
+ return 1
111
+
112
+ # 模板路径
113
+ tmpl_path = TEMPLATES_DIR / 'spider' / 'spider.py.tmpl'
114
+ if not tmpl_path.exists():
115
+ show_error_panel(
116
+ "Template Not Found",
117
+ f"Template file not found at [cyan]{tmpl_path}[/cyan]"
118
+ )
119
+ return 1
120
+
121
+ # 生成类名
122
+ class_name = f"{spider_name.replace('_', '').capitalize()}Spider"
123
+
124
+ context = {
125
+ 'spider_name': spider_name,
126
+ 'domain': domain,
127
+ 'project_name': project_package,
128
+ 'item_class': default_item_class,
129
+ 'class_name': class_name
130
+ }
131
+
132
+ try:
133
+ content = _render_template(tmpl_path, context)
134
+ with open(spider_file, 'w', encoding='utf-8') as f:
135
+ f.write(content)
136
+
137
+ console.print(f":white_check_mark: [green]Spider '[bold]{spider_name}[/bold]' created successfully![/green]")
138
+ console.print(f" → Location: [cyan]{spider_file}[/cyan]")
139
+ console.print(f" → Class: [yellow]{class_name}[/yellow]")
140
+ console.print(f" → Domain: [blue]{domain}[/blue]")
141
+ console.print("\n[bold]Next steps:[/bold]")
142
+ console.print(f" [blue]crawlo run[/blue] {spider_name}")
143
+ console.print(f" [blue]crawlo check[/blue] {spider_name}")
144
+
145
+ return 0
146
+
147
+ except Exception as e:
148
+ show_error_panel(
149
+ "Creation Failed",
150
+ f"Failed to create spider: {e}"
151
+ )
152
+ return 1
crawlo/commands/list.py CHANGED
@@ -1,119 +1,156 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- # @Time : 2025-08-31 22:33
5
- # @Author : crawl-coder
6
- # @Desc : 命令行入口:crawlo list,用于列出所有已注册的爬虫
7
- """
8
-
9
- import sys
10
- import configparser
11
- from pathlib import Path
12
- from importlib import import_module
13
-
14
- from crawlo.crawler import CrawlerProcess
15
- from crawlo.utils.log import get_logger
16
-
17
-
18
- logger = get_logger(__name__)
19
-
20
-
21
- def get_project_root():
22
- """
23
- 自动检测项目根目录:从当前目录向上查找 crawlo.cfg
24
- 找到后返回该目录路径(字符串),最多向上查找10层。
25
- """
26
- current = Path.cwd()
27
-
28
- for _ in range(10):
29
- cfg = current / "crawlo.cfg"
30
- if cfg.exists():
31
- return str(current)
32
-
33
- # 到达文件系统根目录
34
- if current == current.parent:
35
- break
36
- current = current.parent
37
-
38
- return None # 未找到
39
-
40
-
41
- def main(args):
42
- """
43
- 主函数:列出所有可用爬虫
44
- 用法: crawlo list
45
- """
46
- if args:
47
- print("❌ Usage: crawlo list")
48
- return 1
49
-
50
- try:
51
- # 1. 查找项目根目录
52
- project_root = get_project_root()
53
- if not project_root:
54
- print("❌ Error: Cannot find 'crawlo.cfg'. Are you in a crawlo project?")
55
- print("💡 Tip: Run this command inside your project directory, or create a project with 'crawlo startproject'.")
56
- return 1
57
-
58
- project_root_path = Path(project_root)
59
- project_root_str = str(project_root_path)
60
-
61
- # 2. 将项目根加入 Python 路径,以便导入项目模块
62
- if project_root_str not in sys.path:
63
- sys.path.insert(0, project_root_str)
64
-
65
- # 3. 读取 crawlo.cfg 获取 settings 模块
66
- cfg_file = project_root_path / "crawlo.cfg"
67
- config = configparser.ConfigParser()
68
- config.read(cfg_file, encoding="utf-8")
69
-
70
- if not config.has_section("settings") or not config.has_option("settings", "default"):
71
- print(" Error: Invalid crawlo.cfg missing [settings] or 'default' option.")
72
- return 1
73
-
74
- settings_module = config.get("settings", "default")
75
- project_package = settings_module.split(".")[0]
76
-
77
- # 4. 确保项目包可导入(可选:尝试导入以触发异常)
78
- try:
79
- import_module(project_package)
80
- except ImportError as e:
81
- print(f"❌ Failed to import project package '{project_package}': {e}")
82
- return 1
83
-
84
- # 5. 初始化 CrawlerProcess 并加载爬虫模块
85
- spider_modules = [f"{project_package}.spiders"]
86
- process = CrawlerProcess(spider_modules=spider_modules)
87
-
88
- # 6. 获取所有爬虫名称
89
- spider_names = process.get_spider_names()
90
- if not spider_names:
91
- print("📭 No spiders found in 'spiders/' directory.")
92
- print("💡 Make sure:")
93
- print(" Spider classes inherit from `crawlo.spider.Spider`")
94
- print(" Each spider has a `name` attribute")
95
- print(" Spiders are imported in `spiders/__init__.py` (if using package)")
96
- return 1
97
-
98
- # 7. 输出爬虫列表
99
- print(f"📋 Found {len(spider_names)} spider(s):")
100
- print("-" * 60)
101
- for name in sorted(spider_names):
102
- spider_cls = process.get_spider_class(name)
103
- module_name = spider_cls.__module__.replace(f"{project_package}.", "")
104
- print(f"🕷️ {name:<20} {spider_cls.__name__:<25} ({module_name})")
105
- print("-" * 60)
106
- return 0
107
-
108
- except Exception as e:
109
- print(f"❌ Unexpected error: {e}")
110
- logger.exception("Exception during 'crawlo list'")
111
- return 1
112
-
113
-
114
- if __name__ == "__main__":
115
- """
116
- 支持直接运行:
117
- python -m crawlo.commands.list
118
- """
119
- sys.exit(main(sys.argv[1:]))
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-08-31 22:33
5
+ # @Author : crawl-coder
6
+ # @Desc : 命令行入口:crawlo list,用于列出所有已注册的爬虫
7
+ """
8
+ import sys
9
+ from pathlib import Path
10
+ from importlib import import_module
11
+
12
+ from rich.console import Console
13
+ from rich.table import Table
14
+ from rich.panel import Panel
15
+ from rich.text import Text
16
+ from rich import box
17
+
18
+ from crawlo.crawler import CrawlerProcess
19
+ from crawlo.utils.log import get_logger
20
+ from .utils import validate_project_environment, show_error_panel
21
+
22
+ logger = get_logger(__name__)
23
+ console = Console()
24
+
25
+
26
+ def main(args):
27
+ """
28
+ 主函数:列出所有可用爬虫
29
+ 用法: crawlo list [--json]
30
+ """
31
+ show_json = "--json" in args
32
+
33
+ # 过滤掉参数后检查是否有额外参数
34
+ filtered_args = [arg for arg in args if not arg.startswith('--')]
35
+ if filtered_args:
36
+ if show_json:
37
+ console.print_json(data={"success": False, "error": "Usage: crawlo list [--json]"})
38
+ else:
39
+ console.print("[bold red]❌ Error:[/bold red] Usage: [blue]crawlo list[/blue] [--json]")
40
+ return 1
41
+
42
+ try:
43
+ # 验证项目环境
44
+ is_valid, project_package, error_msg = validate_project_environment()
45
+ if not is_valid:
46
+ if show_json:
47
+ console.print_json(data={"success": False, "error": error_msg})
48
+ else:
49
+ show_error_panel("Not a Crawlo Project", error_msg)
50
+ return 1
51
+
52
+ # 初始化 CrawlerProcess 并加载爬虫模块
53
+ spider_modules = [f"{project_package}.spiders"]
54
+ process = CrawlerProcess(spider_modules=spider_modules)
55
+
56
+ # 获取所有爬虫名称
57
+ spider_names = process.get_spider_names()
58
+ if not spider_names:
59
+ if show_json:
60
+ console.print_json(data={
61
+ "success": True,
62
+ "spiders": [],
63
+ "message": "No spiders found in project"
64
+ })
65
+ else:
66
+ console.print(Panel(
67
+ Text.from_markup(
68
+ ":envelope_with_arrow: [bold]No spiders found[/bold] in '[cyan]spiders/[/cyan]' directory.\n\n"
69
+ "[bold]💡 Make sure:[/bold]\n"
70
+ " • Spider classes inherit from [blue]`crawlo.spider.Spider`[/blue]\n"
71
+ " Each spider has a [green]`name`[/green] attribute\n"
72
+ " • Spiders are imported in [cyan]`spiders/__init__.py`[/cyan] (if using package)"
73
+ ),
74
+ title="📭 No Spiders Found",
75
+ border_style="yellow",
76
+ padding=(1, 2)
77
+ ))
78
+ return 0
79
+
80
+ # 准备爬虫信息
81
+ spider_info = []
82
+ for name in sorted(spider_names):
83
+ spider_cls = process.get_spider_class(name)
84
+ module_name = spider_cls.__module__.replace(f"{project_package}.", "")
85
+
86
+ # 获取额外信息
87
+ start_urls_count = len(getattr(spider_cls, 'start_urls', []))
88
+ allowed_domains = getattr(spider_cls, 'allowed_domains', [])
89
+ custom_settings = getattr(spider_cls, 'custom_settings', {})
90
+
91
+ spider_info.append({
92
+ "name": name,
93
+ "class": spider_cls.__name__,
94
+ "module": module_name,
95
+ "start_urls_count": start_urls_count,
96
+ "allowed_domains": allowed_domains,
97
+ "has_custom_settings": bool(custom_settings)
98
+ })
99
+
100
+ # JSON 输出
101
+ if show_json:
102
+ console.print_json(data={
103
+ "success": True,
104
+ "count": len(spider_info),
105
+ "spiders": spider_info
106
+ })
107
+ return 0
108
+
109
+ # 表格输出
110
+ table = Table(
111
+ title=f"📋 Found {len(spider_names)} spider(s)",
112
+ box=box.ROUNDED,
113
+ show_header=True,
114
+ header_style="bold magenta",
115
+ title_style="bold green"
116
+ )
117
+ table.add_column("Name", style="cyan", no_wrap=True)
118
+ table.add_column("Class", style="green")
119
+ table.add_column("Module", style="dim")
120
+ table.add_column("URLs", style="blue", justify="center")
121
+ table.add_column("Domains", style="yellow")
122
+ table.add_column("Custom Settings", style="magenta", justify="center")
123
+
124
+ for info in spider_info:
125
+ domains_display = ", ".join(info["allowed_domains"][:2]) # 显示前2个域名
126
+ if len(info["allowed_domains"]) > 2:
127
+ domains_display += f" (+{len(info['allowed_domains'])-2})"
128
+ elif not domains_display:
129
+ domains_display = "-"
130
+
131
+ table.add_row(
132
+ info["name"],
133
+ info["class"],
134
+ info["module"],
135
+ str(info["start_urls_count"]),
136
+ domains_display,
137
+ "✓" if info["has_custom_settings"] else "-"
138
+ )
139
+
140
+ console.print(table)
141
+
142
+ # 显示使用提示
143
+ console.print("\n[bold]🚀 Next steps:[/bold]")
144
+ console.print(" [blue]crawlo run[/blue] <spider_name> # Run a specific spider")
145
+ console.print(" [blue]crawlo run[/blue] all # Run all spiders")
146
+ console.print(" [blue]crawlo check[/blue] <spider_name> # Check spider validity")
147
+
148
+ return 0
149
+
150
+ except Exception as e:
151
+ if show_json:
152
+ console.print_json(data={"success": False, "error": str(e)})
153
+ else:
154
+ console.print(f"[bold red]❌ Unexpected error:[/bold red] {e}")
155
+ logger.exception("Exception during 'crawlo list'")
156
+ return 1