crawlo 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (118) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +165 -165
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -259
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +81 -81
  25. crawlo/extension/__init__.py +38 -31
  26. crawlo/extension/health_check.py +142 -0
  27. crawlo/extension/log_interval.py +58 -49
  28. crawlo/extension/log_stats.py +82 -44
  29. crawlo/extension/logging_extension.py +44 -35
  30. crawlo/extension/memory_monitor.py +89 -0
  31. crawlo/extension/performance_profiler.py +118 -0
  32. crawlo/extension/request_recorder.py +108 -0
  33. crawlo/filters/__init__.py +154 -154
  34. crawlo/filters/aioredis_filter.py +241 -241
  35. crawlo/filters/memory_filter.py +269 -269
  36. crawlo/items/__init__.py +23 -23
  37. crawlo/items/base.py +21 -21
  38. crawlo/items/fields.py +53 -53
  39. crawlo/items/items.py +104 -104
  40. crawlo/middleware/__init__.py +21 -21
  41. crawlo/middleware/default_header.py +32 -32
  42. crawlo/middleware/download_delay.py +28 -28
  43. crawlo/middleware/middleware_manager.py +135 -135
  44. crawlo/middleware/proxy.py +248 -248
  45. crawlo/middleware/request_ignore.py +30 -30
  46. crawlo/middleware/response_code.py +18 -18
  47. crawlo/middleware/response_filter.py +26 -26
  48. crawlo/middleware/retry.py +124 -124
  49. crawlo/mode_manager.py +200 -200
  50. crawlo/network/__init__.py +21 -21
  51. crawlo/network/request.py +311 -311
  52. crawlo/network/response.py +271 -271
  53. crawlo/pipelines/__init__.py +21 -21
  54. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  55. crawlo/pipelines/console_pipeline.py +39 -39
  56. crawlo/pipelines/csv_pipeline.py +316 -316
  57. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  58. crawlo/pipelines/json_pipeline.py +218 -218
  59. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  60. crawlo/pipelines/mongo_pipeline.py +132 -117
  61. crawlo/pipelines/mysql_pipeline.py +317 -195
  62. crawlo/pipelines/pipeline_manager.py +56 -56
  63. crawlo/pipelines/redis_dedup_pipeline.py +162 -162
  64. crawlo/project.py +153 -153
  65. crawlo/queue/pqueue.py +37 -37
  66. crawlo/queue/queue_manager.py +307 -307
  67. crawlo/queue/redis_priority_queue.py +208 -208
  68. crawlo/settings/__init__.py +7 -7
  69. crawlo/settings/default_settings.py +278 -244
  70. crawlo/settings/setting_manager.py +99 -99
  71. crawlo/spider/__init__.py +639 -639
  72. crawlo/stats_collector.py +59 -59
  73. crawlo/subscriber.py +131 -106
  74. crawlo/task_manager.py +30 -30
  75. crawlo/templates/crawlo.cfg.tmpl +10 -10
  76. crawlo/templates/project/__init__.py.tmpl +3 -3
  77. crawlo/templates/project/items.py.tmpl +17 -17
  78. crawlo/templates/project/middlewares.py.tmpl +111 -87
  79. crawlo/templates/project/pipelines.py.tmpl +97 -341
  80. crawlo/templates/project/run.py.tmpl +251 -251
  81. crawlo/templates/project/settings.py.tmpl +279 -250
  82. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  83. crawlo/templates/spider/spider.py.tmpl +142 -178
  84. crawlo/utils/__init__.py +7 -7
  85. crawlo/utils/controlled_spider_mixin.py +439 -439
  86. crawlo/utils/date_tools.py +233 -233
  87. crawlo/utils/db_helper.py +343 -343
  88. crawlo/utils/func_tools.py +82 -82
  89. crawlo/utils/large_scale_config.py +286 -286
  90. crawlo/utils/large_scale_helper.py +343 -343
  91. crawlo/utils/log.py +128 -128
  92. crawlo/utils/queue_helper.py +175 -175
  93. crawlo/utils/request.py +267 -267
  94. crawlo/utils/request_serializer.py +219 -219
  95. crawlo/utils/spider_loader.py +62 -62
  96. crawlo/utils/system.py +11 -11
  97. crawlo/utils/tools.py +4 -4
  98. crawlo/utils/url.py +39 -39
  99. crawlo-1.1.4.dist-info/METADATA +403 -0
  100. crawlo-1.1.4.dist-info/RECORD +117 -0
  101. examples/__init__.py +7 -7
  102. examples/controlled_spider_example.py +205 -205
  103. tests/__init__.py +7 -7
  104. tests/test_final_validation.py +153 -153
  105. tests/test_proxy_health_check.py +32 -32
  106. tests/test_proxy_middleware_integration.py +136 -136
  107. tests/test_proxy_providers.py +56 -56
  108. tests/test_proxy_stats.py +19 -19
  109. tests/test_proxy_strategies.py +59 -59
  110. tests/test_redis_config.py +28 -28
  111. tests/test_redis_queue.py +224 -224
  112. tests/test_request_serialization.py +70 -70
  113. tests/test_scheduler.py +241 -241
  114. crawlo-1.1.3.dist-info/METADATA +0 -635
  115. crawlo-1.1.3.dist-info/RECORD +0 -113
  116. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/WHEEL +0 -0
  117. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/entry_points.txt +0 -0
  118. {crawlo-1.1.3.dist-info → crawlo-1.1.4.dist-info}/top_level.txt +0 -0
@@ -1,152 +1,152 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- # @Time : 2025-08-31 22:36
5
- # @Author : crawl-coder
6
- # @Desc : 命令行入口:crawlo genspider baidu,创建爬虫。
7
- """
8
- import sys
9
- from pathlib import Path
10
- import configparser
11
- import importlib
12
- from rich.console import Console
13
-
14
- from .utils import (
15
- get_project_root,
16
- validate_project_environment,
17
- show_error_panel,
18
- show_success_panel,
19
- validate_spider_name,
20
- is_valid_domain
21
- )
22
-
23
- # 初始化 rich 控制台
24
- console = Console()
25
-
26
- TEMPLATES_DIR = Path(__file__).parent.parent / 'templates'
27
-
28
-
29
- def _render_template(tmpl_path, context):
30
- """读取模板文件,替换 {{key}} 为 context 中的值"""
31
- with open(tmpl_path, 'r', encoding='utf-8') as f:
32
- content = f.read()
33
- for key, value in context.items():
34
- content = content.replace(f'{{{{{key}}}}}', str(value))
35
- return content
36
-
37
-
38
- def main(args):
39
- if len(args) < 2:
40
- console.print("[bold red]Error:[/bold red] Usage: [blue]crawlo genspider[/blue] <spider_name> <domain>")
41
- console.print("💡 Examples:")
42
- console.print(" [blue]crawlo genspider[/blue] news_spider news.example.com")
43
- console.print(" [blue]crawlo genspider[/blue] product_spider shop.example.com")
44
- return 1
45
-
46
- spider_name = args[0]
47
- domain = args[1]
48
-
49
- # 验证爬虫名称
50
- if not validate_spider_name(spider_name):
51
- show_error_panel(
52
- "Invalid Spider Name",
53
- f"Spider name '[cyan]{spider_name}[/cyan]' is invalid.\n"
54
- "💡 Spider name should:\n"
55
- " • Start with lowercase letter\n"
56
- " • Contain only lowercase letters, numbers, and underscores\n"
57
- " • Be a valid Python identifier"
58
- )
59
- return 1
60
-
61
- # 验证域名格式
62
- if not is_valid_domain(domain):
63
- show_error_panel(
64
- "Invalid Domain",
65
- f"Domain '[cyan]{domain}[/cyan]' format is invalid.\n"
66
- "💡 Please provide a valid domain name like 'example.com'"
67
- )
68
- return 1
69
-
70
- # 验证项目环境
71
- is_valid, project_package, error_msg = validate_project_environment()
72
- if not is_valid:
73
- show_error_panel("Not a Crawlo Project", error_msg)
74
- return 1
75
-
76
- project_root = get_project_root()
77
-
78
- # 确定 items 模块的路径
79
- items_module_path = f"{project_package}.items"
80
-
81
- # 尝试导入 items 模块
82
- default_item_class = "ExampleItem" # 默认回退
83
- try:
84
- items_module = importlib.import_module(items_module_path)
85
- # 获取模块中所有大写开头的类
86
- item_classes = [
87
- cls for cls in items_module.__dict__.values()
88
- if isinstance(cls, type) and cls.__name__[0].isupper() # 首字母大写
89
- ]
90
-
91
- if item_classes:
92
- default_item_class = item_classes[0].__name__
93
- else:
94
- console.print("[yellow]:warning: Warning:[/yellow] No item class found in [cyan]items.py[/cyan], using [green]ExampleItem[/green].")
95
-
96
- except ImportError as e:
97
- console.print(f"[yellow]:warning: Warning:[/yellow] Failed to import [cyan]{items_module_path}[/cyan]: {e}")
98
- # 仍使用默认 ExampleItem,不中断流程
99
-
100
- # 创建爬虫文件
101
- spiders_dir = project_root / project_package / 'spiders'
102
- spiders_dir.mkdir(parents=True, exist_ok=True)
103
-
104
- spider_file = spiders_dir / f'{spider_name}.py'
105
- if spider_file.exists():
106
- show_error_panel(
107
- "Spider Already Exists",
108
- f"Spider '[cyan]{spider_name}[/cyan]' already exists at\n[green]{spider_file}[/green]"
109
- )
110
- return 1
111
-
112
- # 模板路径
113
- tmpl_path = TEMPLATES_DIR / 'spider' / 'spider.py.tmpl'
114
- if not tmpl_path.exists():
115
- show_error_panel(
116
- "Template Not Found",
117
- f"Template file not found at [cyan]{tmpl_path}[/cyan]"
118
- )
119
- return 1
120
-
121
- # 生成类名
122
- class_name = f"{spider_name.replace('_', '').capitalize()}Spider"
123
-
124
- context = {
125
- 'spider_name': spider_name,
126
- 'domain': domain,
127
- 'project_name': project_package,
128
- 'item_class': default_item_class,
129
- 'class_name': class_name
130
- }
131
-
132
- try:
133
- content = _render_template(tmpl_path, context)
134
- with open(spider_file, 'w', encoding='utf-8') as f:
135
- f.write(content)
136
-
137
- console.print(f":white_check_mark: [green]Spider '[bold]{spider_name}[/bold]' created successfully![/green]")
138
- console.print(f" → Location: [cyan]{spider_file}[/cyan]")
139
- console.print(f" → Class: [yellow]{class_name}[/yellow]")
140
- console.print(f" → Domain: [blue]{domain}[/blue]")
141
- console.print("\n[bold]Next steps:[/bold]")
142
- console.print(f" [blue]crawlo run[/blue] {spider_name}")
143
- console.print(f" [blue]crawlo check[/blue] {spider_name}")
144
-
145
- return 0
146
-
147
- except Exception as e:
148
- show_error_panel(
149
- "Creation Failed",
150
- f"Failed to create spider: {e}"
151
- )
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-08-31 22:36
5
+ # @Author : crawl-coder
6
+ # @Desc : 命令行入口:crawlo genspider baidu,创建爬虫。
7
+ """
8
+ import sys
9
+ from pathlib import Path
10
+ import configparser
11
+ import importlib
12
+ from rich.console import Console
13
+
14
+ from .utils import (
15
+ get_project_root,
16
+ validate_project_environment,
17
+ show_error_panel,
18
+ show_success_panel,
19
+ validate_spider_name,
20
+ is_valid_domain
21
+ )
22
+
23
+ # 初始化 rich 控制台
24
+ console = Console()
25
+
26
+ TEMPLATES_DIR = Path(__file__).parent.parent / 'templates'
27
+
28
+
29
+ def _render_template(tmpl_path, context):
30
+ """读取模板文件,替换 {{key}} 为 context 中的值"""
31
+ with open(tmpl_path, 'r', encoding='utf-8') as f:
32
+ content = f.read()
33
+ for key, value in context.items():
34
+ content = content.replace(f'{{{{{key}}}}}', str(value))
35
+ return content
36
+
37
+
38
+ def main(args):
39
+ if len(args) < 2:
40
+ console.print("[bold red]Error:[/bold red] Usage: [blue]crawlo genspider[/blue] <spider_name> <domain>")
41
+ console.print("💡 Examples:")
42
+ console.print(" [blue]crawlo genspider[/blue] news_spider news.example.com")
43
+ console.print(" [blue]crawlo genspider[/blue] product_spider shop.example.com")
44
+ return 1
45
+
46
+ spider_name = args[0]
47
+ domain = args[1]
48
+
49
+ # 验证爬虫名称
50
+ if not validate_spider_name(spider_name):
51
+ show_error_panel(
52
+ "Invalid Spider Name",
53
+ f"Spider name '[cyan]{spider_name}[/cyan]' is invalid.\n"
54
+ "💡 Spider name should:\n"
55
+ " • Start with lowercase letter\n"
56
+ " • Contain only lowercase letters, numbers, and underscores\n"
57
+ " • Be a valid Python identifier"
58
+ )
59
+ return 1
60
+
61
+ # 验证域名格式
62
+ if not is_valid_domain(domain):
63
+ show_error_panel(
64
+ "Invalid Domain",
65
+ f"Domain '[cyan]{domain}[/cyan]' format is invalid.\n"
66
+ "💡 Please provide a valid domain name like 'example.com'"
67
+ )
68
+ return 1
69
+
70
+ # 验证项目环境
71
+ is_valid, project_package, error_msg = validate_project_environment()
72
+ if not is_valid:
73
+ show_error_panel("Not a Crawlo Project", error_msg)
74
+ return 1
75
+
76
+ project_root = get_project_root()
77
+
78
+ # 确定 items 模块的路径
79
+ items_module_path = f"{project_package}.items"
80
+
81
+ # 尝试导入 items 模块
82
+ default_item_class = "ExampleItem" # 默认回退
83
+ try:
84
+ items_module = importlib.import_module(items_module_path)
85
+ # 获取模块中所有大写开头的类
86
+ item_classes = [
87
+ cls for cls in items_module.__dict__.values()
88
+ if isinstance(cls, type) and cls.__name__[0].isupper() # 首字母大写
89
+ ]
90
+
91
+ if item_classes:
92
+ default_item_class = item_classes[0].__name__
93
+ else:
94
+ console.print("[yellow]:warning: Warning:[/yellow] No item class found in [cyan]items.py[/cyan], using [green]ExampleItem[/green].")
95
+
96
+ except ImportError as e:
97
+ console.print(f"[yellow]:warning: Warning:[/yellow] Failed to import [cyan]{items_module_path}[/cyan]: {e}")
98
+ # 仍使用默认 ExampleItem,不中断流程
99
+
100
+ # 创建爬虫文件
101
+ spiders_dir = project_root / project_package / 'spiders'
102
+ spiders_dir.mkdir(parents=True, exist_ok=True)
103
+
104
+ spider_file = spiders_dir / f'{spider_name}.py'
105
+ if spider_file.exists():
106
+ show_error_panel(
107
+ "Spider Already Exists",
108
+ f"Spider '[cyan]{spider_name}[/cyan]' already exists at\n[green]{spider_file}[/green]"
109
+ )
110
+ return 1
111
+
112
+ # 模板路径
113
+ tmpl_path = TEMPLATES_DIR / 'spider' / 'spider.py.tmpl'
114
+ if not tmpl_path.exists():
115
+ show_error_panel(
116
+ "Template Not Found",
117
+ f"Template file not found at [cyan]{tmpl_path}[/cyan]"
118
+ )
119
+ return 1
120
+
121
+ # 生成类名
122
+ class_name = f"{spider_name.replace('_', '').capitalize()}Spider"
123
+
124
+ context = {
125
+ 'spider_name': spider_name,
126
+ 'domain': domain,
127
+ 'project_name': project_package,
128
+ 'item_class': default_item_class,
129
+ 'class_name': class_name
130
+ }
131
+
132
+ try:
133
+ content = _render_template(tmpl_path, context)
134
+ with open(spider_file, 'w', encoding='utf-8') as f:
135
+ f.write(content)
136
+
137
+ console.print(f":white_check_mark: [green]Spider '[bold]{spider_name}[/bold]' created successfully![/green]")
138
+ console.print(f" → Location: [cyan]{spider_file}[/cyan]")
139
+ console.print(f" → Class: [yellow]{class_name}[/yellow]")
140
+ console.print(f" → Domain: [blue]{domain}[/blue]")
141
+ console.print("\n[bold]Next steps:[/bold]")
142
+ console.print(f" [blue]crawlo run[/blue] {spider_name}")
143
+ console.print(f" [blue]crawlo check[/blue] {spider_name}")
144
+
145
+ return 0
146
+
147
+ except Exception as e:
148
+ show_error_panel(
149
+ "Creation Failed",
150
+ f"Failed to create spider: {e}"
151
+ )
152
152
  return 1
crawlo/commands/list.py CHANGED
@@ -1,156 +1,156 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- # @Time : 2025-08-31 22:33
5
- # @Author : crawl-coder
6
- # @Desc : 命令行入口:crawlo list,用于列出所有已注册的爬虫
7
- """
8
- import sys
9
- from pathlib import Path
10
- from importlib import import_module
11
-
12
- from rich.console import Console
13
- from rich.table import Table
14
- from rich.panel import Panel
15
- from rich.text import Text
16
- from rich import box
17
-
18
- from crawlo.crawler import CrawlerProcess
19
- from crawlo.utils.log import get_logger
20
- from .utils import validate_project_environment, show_error_panel
21
-
22
- logger = get_logger(__name__)
23
- console = Console()
24
-
25
-
26
- def main(args):
27
- """
28
- 主函数:列出所有可用爬虫
29
- 用法: crawlo list [--json]
30
- """
31
- show_json = "--json" in args
32
-
33
- # 过滤掉参数后检查是否有额外参数
34
- filtered_args = [arg for arg in args if not arg.startswith('--')]
35
- if filtered_args:
36
- if show_json:
37
- console.print_json(data={"success": False, "error": "Usage: crawlo list [--json]"})
38
- else:
39
- console.print("[bold red]❌ Error:[/bold red] Usage: [blue]crawlo list[/blue] [--json]")
40
- return 1
41
-
42
- try:
43
- # 验证项目环境
44
- is_valid, project_package, error_msg = validate_project_environment()
45
- if not is_valid:
46
- if show_json:
47
- console.print_json(data={"success": False, "error": error_msg})
48
- else:
49
- show_error_panel("Not a Crawlo Project", error_msg)
50
- return 1
51
-
52
- # 初始化 CrawlerProcess 并加载爬虫模块
53
- spider_modules = [f"{project_package}.spiders"]
54
- process = CrawlerProcess(spider_modules=spider_modules)
55
-
56
- # 获取所有爬虫名称
57
- spider_names = process.get_spider_names()
58
- if not spider_names:
59
- if show_json:
60
- console.print_json(data={
61
- "success": True,
62
- "spiders": [],
63
- "message": "No spiders found in project"
64
- })
65
- else:
66
- console.print(Panel(
67
- Text.from_markup(
68
- ":envelope_with_arrow: [bold]No spiders found[/bold] in '[cyan]spiders/[/cyan]' directory.\n\n"
69
- "[bold]💡 Make sure:[/bold]\n"
70
- " • Spider classes inherit from [blue]`crawlo.spider.Spider`[/blue]\n"
71
- " • Each spider has a [green]`name`[/green] attribute\n"
72
- " • Spiders are imported in [cyan]`spiders/__init__.py`[/cyan] (if using package)"
73
- ),
74
- title="📭 No Spiders Found",
75
- border_style="yellow",
76
- padding=(1, 2)
77
- ))
78
- return 0
79
-
80
- # 准备爬虫信息
81
- spider_info = []
82
- for name in sorted(spider_names):
83
- spider_cls = process.get_spider_class(name)
84
- module_name = spider_cls.__module__.replace(f"{project_package}.", "")
85
-
86
- # 获取额外信息
87
- start_urls_count = len(getattr(spider_cls, 'start_urls', []))
88
- allowed_domains = getattr(spider_cls, 'allowed_domains', [])
89
- custom_settings = getattr(spider_cls, 'custom_settings', {})
90
-
91
- spider_info.append({
92
- "name": name,
93
- "class": spider_cls.__name__,
94
- "module": module_name,
95
- "start_urls_count": start_urls_count,
96
- "allowed_domains": allowed_domains,
97
- "has_custom_settings": bool(custom_settings)
98
- })
99
-
100
- # JSON 输出
101
- if show_json:
102
- console.print_json(data={
103
- "success": True,
104
- "count": len(spider_info),
105
- "spiders": spider_info
106
- })
107
- return 0
108
-
109
- # 表格输出
110
- table = Table(
111
- title=f"📋 Found {len(spider_names)} spider(s)",
112
- box=box.ROUNDED,
113
- show_header=True,
114
- header_style="bold magenta",
115
- title_style="bold green"
116
- )
117
- table.add_column("Name", style="cyan", no_wrap=True)
118
- table.add_column("Class", style="green")
119
- table.add_column("Module", style="dim")
120
- table.add_column("URLs", style="blue", justify="center")
121
- table.add_column("Domains", style="yellow")
122
- table.add_column("Custom Settings", style="magenta", justify="center")
123
-
124
- for info in spider_info:
125
- domains_display = ", ".join(info["allowed_domains"][:2]) # 显示前2个域名
126
- if len(info["allowed_domains"]) > 2:
127
- domains_display += f" (+{len(info['allowed_domains'])-2})"
128
- elif not domains_display:
129
- domains_display = "-"
130
-
131
- table.add_row(
132
- info["name"],
133
- info["class"],
134
- info["module"],
135
- str(info["start_urls_count"]),
136
- domains_display,
137
- "✓" if info["has_custom_settings"] else "-"
138
- )
139
-
140
- console.print(table)
141
-
142
- # 显示使用提示
143
- console.print("\n[bold]🚀 Next steps:[/bold]")
144
- console.print(" [blue]crawlo run[/blue] <spider_name> # Run a specific spider")
145
- console.print(" [blue]crawlo run[/blue] all # Run all spiders")
146
- console.print(" [blue]crawlo check[/blue] <spider_name> # Check spider validity")
147
-
148
- return 0
149
-
150
- except Exception as e:
151
- if show_json:
152
- console.print_json(data={"success": False, "error": str(e)})
153
- else:
154
- console.print(f"[bold red]❌ Unexpected error:[/bold red] {e}")
155
- logger.exception("Exception during 'crawlo list'")
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-08-31 22:33
5
+ # @Author : crawl-coder
6
+ # @Desc : 命令行入口:crawlo list,用于列出所有已注册的爬虫
7
+ """
8
+ import sys
9
+ from pathlib import Path
10
+ from importlib import import_module
11
+
12
+ from rich.console import Console
13
+ from rich.table import Table
14
+ from rich.panel import Panel
15
+ from rich.text import Text
16
+ from rich import box
17
+
18
+ from crawlo.crawler import CrawlerProcess
19
+ from crawlo.utils.log import get_logger
20
+ from .utils import validate_project_environment, show_error_panel
21
+
22
+ logger = get_logger(__name__)
23
+ console = Console()
24
+
25
+
26
+ def main(args):
27
+ """
28
+ 主函数:列出所有可用爬虫
29
+ 用法: crawlo list [--json]
30
+ """
31
+ show_json = "--json" in args
32
+
33
+ # 过滤掉参数后检查是否有额外参数
34
+ filtered_args = [arg for arg in args if not arg.startswith('--')]
35
+ if filtered_args:
36
+ if show_json:
37
+ console.print_json(data={"success": False, "error": "Usage: crawlo list [--json]"})
38
+ else:
39
+ console.print("[bold red]❌ Error:[/bold red] Usage: [blue]crawlo list[/blue] [--json]")
40
+ return 1
41
+
42
+ try:
43
+ # 验证项目环境
44
+ is_valid, project_package, error_msg = validate_project_environment()
45
+ if not is_valid:
46
+ if show_json:
47
+ console.print_json(data={"success": False, "error": error_msg})
48
+ else:
49
+ show_error_panel("Not a Crawlo Project", error_msg)
50
+ return 1
51
+
52
+ # 初始化 CrawlerProcess 并加载爬虫模块
53
+ spider_modules = [f"{project_package}.spiders"]
54
+ process = CrawlerProcess(spider_modules=spider_modules)
55
+
56
+ # 获取所有爬虫名称
57
+ spider_names = process.get_spider_names()
58
+ if not spider_names:
59
+ if show_json:
60
+ console.print_json(data={
61
+ "success": True,
62
+ "spiders": [],
63
+ "message": "No spiders found in project"
64
+ })
65
+ else:
66
+ console.print(Panel(
67
+ Text.from_markup(
68
+ ":envelope_with_arrow: [bold]No spiders found[/bold] in '[cyan]spiders/[/cyan]' directory.\n\n"
69
+ "[bold]💡 Make sure:[/bold]\n"
70
+ " • Spider classes inherit from [blue]`crawlo.spider.Spider`[/blue]\n"
71
+ " • Each spider has a [green]`name`[/green] attribute\n"
72
+ " • Spiders are imported in [cyan]`spiders/__init__.py`[/cyan] (if using package)"
73
+ ),
74
+ title="📭 No Spiders Found",
75
+ border_style="yellow",
76
+ padding=(1, 2)
77
+ ))
78
+ return 0
79
+
80
+ # 准备爬虫信息
81
+ spider_info = []
82
+ for name in sorted(spider_names):
83
+ spider_cls = process.get_spider_class(name)
84
+ module_name = spider_cls.__module__.replace(f"{project_package}.", "")
85
+
86
+ # 获取额外信息
87
+ start_urls_count = len(getattr(spider_cls, 'start_urls', []))
88
+ allowed_domains = getattr(spider_cls, 'allowed_domains', [])
89
+ custom_settings = getattr(spider_cls, 'custom_settings', {})
90
+
91
+ spider_info.append({
92
+ "name": name,
93
+ "class": spider_cls.__name__,
94
+ "module": module_name,
95
+ "start_urls_count": start_urls_count,
96
+ "allowed_domains": allowed_domains,
97
+ "has_custom_settings": bool(custom_settings)
98
+ })
99
+
100
+ # JSON 输出
101
+ if show_json:
102
+ console.print_json(data={
103
+ "success": True,
104
+ "count": len(spider_info),
105
+ "spiders": spider_info
106
+ })
107
+ return 0
108
+
109
+ # 表格输出
110
+ table = Table(
111
+ title=f"📋 Found {len(spider_names)} spider(s)",
112
+ box=box.ROUNDED,
113
+ show_header=True,
114
+ header_style="bold magenta",
115
+ title_style="bold green"
116
+ )
117
+ table.add_column("Name", style="cyan", no_wrap=True)
118
+ table.add_column("Class", style="green")
119
+ table.add_column("Module", style="dim")
120
+ table.add_column("URLs", style="blue", justify="center")
121
+ table.add_column("Domains", style="yellow")
122
+ table.add_column("Custom Settings", style="magenta", justify="center")
123
+
124
+ for info in spider_info:
125
+ domains_display = ", ".join(info["allowed_domains"][:2]) # 显示前2个域名
126
+ if len(info["allowed_domains"]) > 2:
127
+ domains_display += f" (+{len(info['allowed_domains'])-2})"
128
+ elif not domains_display:
129
+ domains_display = "-"
130
+
131
+ table.add_row(
132
+ info["name"],
133
+ info["class"],
134
+ info["module"],
135
+ str(info["start_urls_count"]),
136
+ domains_display,
137
+ "✓" if info["has_custom_settings"] else "-"
138
+ )
139
+
140
+ console.print(table)
141
+
142
+ # 显示使用提示
143
+ console.print("\n[bold]🚀 Next steps:[/bold]")
144
+ console.print(" [blue]crawlo run[/blue] <spider_name> # Run a specific spider")
145
+ console.print(" [blue]crawlo run[/blue] all # Run all spiders")
146
+ console.print(" [blue]crawlo check[/blue] <spider_name> # Check spider validity")
147
+
148
+ return 0
149
+
150
+ except Exception as e:
151
+ if show_json:
152
+ console.print_json(data={"success": False, "error": str(e)})
153
+ else:
154
+ console.print(f"[bold red]❌ Unexpected error:[/bold red] {e}")
155
+ logger.exception("Exception during 'crawlo list'")
156
156
  return 1