crawlo 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (105) hide show
  1. crawlo/__init__.py +24 -0
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/run.py +58 -32
  4. crawlo/core/__init__.py +44 -0
  5. crawlo/core/engine.py +119 -45
  6. crawlo/core/scheduler.py +4 -3
  7. crawlo/crawler.py +603 -1133
  8. crawlo/downloader/aiohttp_downloader.py +4 -2
  9. crawlo/extension/__init__.py +1 -1
  10. crawlo/extension/logging_extension.py +23 -7
  11. crawlo/factories/__init__.py +28 -0
  12. crawlo/factories/base.py +69 -0
  13. crawlo/factories/crawler.py +104 -0
  14. crawlo/factories/registry.py +85 -0
  15. crawlo/filters/aioredis_filter.py +25 -2
  16. crawlo/framework.py +292 -0
  17. crawlo/initialization/__init__.py +40 -0
  18. crawlo/initialization/built_in.py +426 -0
  19. crawlo/initialization/context.py +142 -0
  20. crawlo/initialization/core.py +194 -0
  21. crawlo/initialization/phases.py +149 -0
  22. crawlo/initialization/registry.py +146 -0
  23. crawlo/items/base.py +2 -1
  24. crawlo/logging/__init__.py +38 -0
  25. crawlo/logging/config.py +97 -0
  26. crawlo/logging/factory.py +129 -0
  27. crawlo/logging/manager.py +112 -0
  28. crawlo/middleware/middleware_manager.py +1 -1
  29. crawlo/middleware/offsite.py +1 -1
  30. crawlo/mode_manager.py +26 -1
  31. crawlo/pipelines/pipeline_manager.py +2 -1
  32. crawlo/project.py +76 -46
  33. crawlo/queue/pqueue.py +11 -5
  34. crawlo/queue/queue_manager.py +143 -19
  35. crawlo/queue/redis_priority_queue.py +69 -49
  36. crawlo/settings/default_settings.py +110 -14
  37. crawlo/settings/setting_manager.py +29 -13
  38. crawlo/spider/__init__.py +34 -16
  39. crawlo/stats_collector.py +17 -3
  40. crawlo/task_manager.py +112 -3
  41. crawlo/templates/project/settings.py.tmpl +103 -202
  42. crawlo/templates/project/settings_distributed.py.tmpl +122 -135
  43. crawlo/templates/project/settings_gentle.py.tmpl +149 -43
  44. crawlo/templates/project/settings_high_performance.py.tmpl +127 -90
  45. crawlo/templates/project/settings_minimal.py.tmpl +46 -15
  46. crawlo/templates/project/settings_simple.py.tmpl +138 -75
  47. crawlo/templates/project/spiders/__init__.py.tmpl +5 -1
  48. crawlo/templates/run.py.tmpl +10 -14
  49. crawlo/templates/spiders_init.py.tmpl +10 -0
  50. crawlo/tools/network_diagnostic.py +365 -0
  51. crawlo/utils/class_loader.py +26 -0
  52. crawlo/utils/error_handler.py +76 -35
  53. crawlo/utils/log.py +41 -144
  54. crawlo/utils/redis_connection_pool.py +43 -6
  55. crawlo/utils/request_serializer.py +8 -1
  56. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/METADATA +120 -14
  57. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/RECORD +104 -45
  58. tests/authenticated_proxy_example.py +2 -2
  59. tests/baidu_performance_test.py +109 -0
  60. tests/baidu_test.py +60 -0
  61. tests/comprehensive_framework_test.py +213 -0
  62. tests/comprehensive_test.py +82 -0
  63. tests/comprehensive_testing_summary.md +187 -0
  64. tests/debug_configure.py +70 -0
  65. tests/debug_framework_logger.py +85 -0
  66. tests/debug_log_levels.py +64 -0
  67. tests/distributed_test.py +67 -0
  68. tests/distributed_test_debug.py +77 -0
  69. tests/final_command_test_report.md +0 -0
  70. tests/final_comprehensive_test.py +152 -0
  71. tests/final_validation_test.py +183 -0
  72. tests/framework_performance_test.py +203 -0
  73. tests/optimized_performance_test.py +212 -0
  74. tests/performance_comparison.py +246 -0
  75. tests/queue_blocking_test.py +114 -0
  76. tests/queue_test.py +90 -0
  77. tests/scrapy_comparison/ofweek_scrapy.py +139 -0
  78. tests/scrapy_comparison/scrapy_test.py +134 -0
  79. tests/simple_command_test.py +120 -0
  80. tests/simple_crawlo_test.py +128 -0
  81. tests/simple_log_test.py +58 -0
  82. tests/simple_optimization_test.py +129 -0
  83. tests/simple_spider_test.py +50 -0
  84. tests/simple_test.py +48 -0
  85. tests/test_all_commands.py +231 -0
  86. tests/test_batch_processor.py +179 -0
  87. tests/test_component_factory.py +175 -0
  88. tests/test_controlled_spider_mixin.py +80 -0
  89. tests/test_enhanced_error_handler_comprehensive.py +246 -0
  90. tests/test_factories.py +253 -0
  91. tests/test_framework_logger.py +67 -0
  92. tests/test_framework_startup.py +65 -0
  93. tests/test_large_scale_config.py +113 -0
  94. tests/test_large_scale_helper.py +236 -0
  95. tests/test_mode_change.py +73 -0
  96. tests/test_mode_consistency.py +1 -1
  97. tests/test_performance_monitor.py +116 -0
  98. tests/test_queue_empty_check.py +42 -0
  99. tests/untested_features_report.md +139 -0
  100. tests/verify_debug.py +52 -0
  101. tests/verify_log_fix.py +112 -0
  102. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
  103. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/WHEEL +0 -0
  104. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/entry_points.txt +0 -0
  105. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 队列阻塞行为测试脚本
5
+ 验证队列满时的阻塞行为
6
+ """
7
+
8
+ import sys
9
+ import os
10
+ import asyncio
11
+ import time
12
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
13
+
14
+ from crawlo.network.request import Request
15
+ from crawlo.queue.queue_manager import QueueConfig, QueueManager
16
+
17
+
18
+ async def test_queue_blocking_behavior():
19
+ """测试队列阻塞行为"""
20
+ print("开始测试队列阻塞行为...")
21
+
22
+ # 初始化框架
23
+ from crawlo.initialization import initialize_framework
24
+ settings = initialize_framework()
25
+
26
+ # 创建一个小型队列配置进行测试
27
+ settings.set('SCHEDULER_MAX_QUEUE_SIZE', 5) # 设置非常小的队列大小
28
+
29
+ # 创建队列配置
30
+ queue_config = QueueConfig.from_settings(settings)
31
+ queue_config.max_queue_size = 5 # 设置小队列大小进行测试
32
+
33
+ # 创建队列管理器
34
+ queue_manager = QueueManager(queue_config)
35
+
36
+ # 初始化队列
37
+ await queue_manager.initialize()
38
+
39
+ print(f"队列类型: {queue_manager._queue_type}")
40
+ print(f"队列最大大小: {queue_config.max_queue_size}")
41
+
42
+ # 创建生产者任务
43
+ async def producer(queue_manager, name, count):
44
+ """生产者:向队列添加请求"""
45
+ print(f"生产者 {name} 开始工作,将添加 {count} 个请求")
46
+ start_time = time.time()
47
+
48
+ for i in range(count):
49
+ request = Request(url=f'https://example.com/page{name}_{i}.html')
50
+ try:
51
+ # 这里应该会阻塞当队列满时
52
+ await queue_manager.put(request)
53
+ print(f"生产者 {name} 成功添加请求 {i}")
54
+ except Exception as e:
55
+ print(f"生产者 {name} 添加请求 {i} 失败: {e}")
56
+
57
+ end_time = time.time()
58
+ print(f"生产者 {name} 完成工作,耗时 {end_time - start_time:.2f} 秒")
59
+
60
+ # 创建消费者任务
61
+ async def consumer(queue_manager, name, count):
62
+ """消费者:从队列获取请求"""
63
+ print(f"消费者 {name} 开始工作,将获取 {count} 个请求")
64
+ start_time = time.time()
65
+
66
+ retrieved_count = 0
67
+ while retrieved_count < count:
68
+ try:
69
+ request = await queue_manager.get(timeout=2.0)
70
+ if request:
71
+ print(f"消费者 {name} 成功获取请求: {request.url}")
72
+ retrieved_count += 1
73
+ # 模拟处理时间
74
+ await asyncio.sleep(0.1)
75
+ else:
76
+ print(f"消费者 {name} 超时,没有获取到请求")
77
+ break
78
+ except Exception as e:
79
+ print(f"消费者 {name} 获取请求失败: {e}")
80
+ break
81
+
82
+ end_time = time.time()
83
+ print(f"消费者 {name} 完成工作,获取了 {retrieved_count} 个请求,耗时 {end_time - start_time:.2f} 秒")
84
+
85
+ # 同时运行生产者和消费者
86
+ print("开始并发测试...")
87
+
88
+ # 生产者尝试添加10个请求到大小为5的队列
89
+ # 消费者会逐渐消费,生产者应该会被阻塞直到有空间
90
+ tasks = [
91
+ asyncio.create_task(producer(queue_manager, "P1", 10)),
92
+ asyncio.create_task(consumer(queue_manager, "C1", 10))
93
+ ]
94
+
95
+ # 等待所有任务完成
96
+ await asyncio.gather(*tasks, return_exceptions=True)
97
+
98
+ final_queue_size = await queue_manager.size()
99
+ print(f"队列最终大小: {final_queue_size}")
100
+
101
+ # 关闭队列
102
+ await queue_manager.close()
103
+
104
+ print("队列阻塞行为测试完成!")
105
+
106
+
107
+ def main():
108
+ """主函数"""
109
+ print("开始队列阻塞行为测试...")
110
+ asyncio.run(test_queue_blocking_behavior())
111
+
112
+
113
+ if __name__ == "__main__":
114
+ main()
tests/queue_test.py ADDED
@@ -0,0 +1,90 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 队列系统测试脚本
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ import asyncio
10
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
11
+
12
+ from crawlo.network.request import Request
13
+ from crawlo.queue.queue_manager import QueueConfig, QueueManager
14
+
15
+
16
+ async def test_queue_system():
17
+ """测试队列系统"""
18
+ print("开始测试队列系统...")
19
+
20
+ # 初始化框架
21
+ from crawlo.initialization import initialize_framework
22
+ settings = initialize_framework()
23
+
24
+ # 创建一个小型队列配置进行测试
25
+ settings.set('SCHEDULER_MAX_QUEUE_SIZE', 10)
26
+
27
+ # 创建队列配置
28
+ queue_config = QueueConfig.from_settings(settings)
29
+ queue_config.max_queue_size = 10 # 设置小队列大小进行测试
30
+
31
+ # 创建队列管理器
32
+ queue_manager = QueueManager(queue_config)
33
+
34
+ # 初始化队列
35
+ await queue_manager.initialize()
36
+
37
+ print(f"队列类型: {queue_manager._queue_type}")
38
+ print(f"队列最大大小: {queue_config.max_queue_size}")
39
+
40
+ # 测试添加请求到队列
41
+ print("测试添加请求到队列...")
42
+ tasks = []
43
+ for i in range(15): # 尝试添加15个请求,超过队列大小
44
+ request = Request(url=f'https://example.com/page{i}.html')
45
+ task = asyncio.create_task(queue_manager.put(request))
46
+ tasks.append(task)
47
+
48
+ # 等待所有任务完成
49
+ results = await asyncio.gather(*tasks, return_exceptions=True)
50
+
51
+ success_count = sum(1 for result in results if result is True)
52
+ print(f"成功添加 {success_count} 个请求到队列")
53
+
54
+ queue_size = await queue_manager.size()
55
+ print(f"队列当前大小: {queue_size}")
56
+
57
+ # 测试从队列获取请求
58
+ print("测试从队列获取请求...")
59
+ retrieved_count = 0
60
+ while retrieved_count < 15:
61
+ try:
62
+ request = await queue_manager.get(timeout=1.0)
63
+ if request:
64
+ print(f"成功获取请求: {request.url}")
65
+ retrieved_count += 1
66
+ else:
67
+ break
68
+ except Exception as e:
69
+ print(f"获取请求失败: {e}")
70
+ break
71
+
72
+ print(f"总共获取了 {retrieved_count} 个请求")
73
+
74
+ final_queue_size = await queue_manager.size()
75
+ print(f"队列最终大小: {final_queue_size}")
76
+
77
+ # 关闭队列
78
+ await queue_manager.close()
79
+
80
+ print("队列系统测试完成!")
81
+
82
+
83
+ def main():
84
+ """主函数"""
85
+ print("开始队列系统测试...")
86
+ asyncio.run(test_queue_system())
87
+
88
+
89
+ if __name__ == "__main__":
90
+ main()
@@ -0,0 +1,139 @@
1
+ # -*- coding: utf-8 -*-
2
+ import scrapy
3
+ from urllib.parse import urljoin
4
+
5
+
6
+ class NewsItem(scrapy.Item):
7
+ title = scrapy.Field()
8
+ publish_time = scrapy.Field()
9
+ url = scrapy.Field()
10
+ source = scrapy.Field()
11
+ content = scrapy.Field()
12
+
13
+
14
+ class OfweekScrapySpider(scrapy.Spider):
15
+ name = 'ofweek_scrapy'
16
+ allowed_domains = ['ee.ofweek.com']
17
+
18
+ def start_requests(self):
19
+ headers = {
20
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
21
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
22
+ "Cache-Control": "no-cache",
23
+ "Connection": "keep-alive",
24
+ "Pragma": "no-cache",
25
+ "Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
26
+ "Sec-Fetch-Dest": "document",
27
+ "Sec-Fetch-Mode": "navigate",
28
+ "Sec-Fetch-Site": "same-origin",
29
+ "Sec-Fetch-User": "?1",
30
+ "Upgrade-Insecure-Requests": "1",
31
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
32
+ "sec-ch-ua": "\"Not;A=Brand\";v=\"99\", \"Google Chrome\";v=\"139\", \"Chromium\";v=\"139\"",
33
+ "sec-ch-ua-mobile": "?0",
34
+ "sec-ch-ua-platform": "\"Windows\""
35
+ }
36
+ cookies = {
37
+ "__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
38
+ "Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
39
+ "Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
40
+ "JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
41
+ "Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
42
+ "HMACCOUNT": "08DF0D235A291EAA",
43
+ "__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
44
+ "__utmc": "57425525",
45
+ "__utmt": "1",
46
+ "__utmb": "57425525.2.10.1757477622",
47
+ "Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
48
+ "index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
49
+ }
50
+
51
+ # 使用较少的页数进行测试
52
+ max_page = 50
53
+ for page in range(1, max_page + 1):
54
+ url = f'https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html'
55
+ yield scrapy.Request(
56
+ url=url,
57
+ callback=self.parse,
58
+ headers=headers,
59
+ cookies=cookies
60
+ )
61
+
62
+ def parse(self, response):
63
+ self.logger.info(f'正在解析页面: {response.url}')
64
+
65
+ rows = response.xpath(
66
+ '//div[@class="main_left"]/div[@class="list_model"]/div[@class="model_right model_right2"]')
67
+ self.logger.info(f"在页面 {response.url} 中找到 {len(rows)} 个条目")
68
+
69
+ for row in rows:
70
+ try:
71
+ # 提取URL和标题
72
+ url = row.xpath('./h3/a/@href').extract_first()
73
+ title = row.xpath('./h3/a/text()').extract_first()
74
+
75
+ # 容错处理
76
+ if not url:
77
+ self.logger.warning(f"条目缺少URL,跳过")
78
+ continue
79
+
80
+ if not title:
81
+ self.logger.warning(f"条目缺少标题,跳过")
82
+ continue
83
+
84
+ # 确保 URL 是绝对路径
85
+ absolute_url = urljoin(response.url, url)
86
+
87
+ # 验证URL格式
88
+ if not absolute_url.startswith(('http://', 'https://')):
89
+ self.logger.warning(f"无效的URL格式,跳过: {absolute_url}")
90
+ continue
91
+
92
+ self.logger.info(f"提取到详情页链接: {absolute_url}, 标题: {title}")
93
+ yield scrapy.Request(
94
+ url=absolute_url,
95
+ meta={
96
+ "title": title.strip() if title else '',
97
+ "parent_url": response.url
98
+ },
99
+ callback=self.parse_detail
100
+ )
101
+ except Exception as e:
102
+ self.logger.error(f"处理条目时出错: {e}")
103
+ continue
104
+
105
+ def parse_detail(self, response):
106
+ self.logger.info(f'正在解析详情页: {response.url}')
107
+
108
+ try:
109
+ title = response.meta.get('title', '')
110
+
111
+ # 提取内容,增加容错处理
112
+ content_elements = response.xpath('//div[@class="TRS_Editor"]|//*[@id="articleC"]')
113
+ if content_elements:
114
+ content = content_elements.xpath('.//text()').extract()
115
+ content = '\n'.join([text.strip() for text in content if text.strip()])
116
+ else:
117
+ content = ''
118
+ self.logger.warning(f"未找到内容区域: {response.url}")
119
+
120
+ # 提取发布时间
121
+ publish_time = response.xpath('//div[@class="time fl"]/text()').extract_first()
122
+ if publish_time:
123
+ publish_time = publish_time.strip()
124
+
125
+ source = response.xpath('//div[@class="source-name"]/text()').extract_first()
126
+
127
+ # 创建数据项
128
+ item = NewsItem()
129
+ item['title'] = title.strip() if title else ''
130
+ item['publish_time'] = publish_time if publish_time else ''
131
+ item['url'] = response.url
132
+ item['source'] = source if source else ''
133
+ item['content'] = content
134
+
135
+ self.logger.info(f"成功提取详情页数据: {item['title']}")
136
+ yield item
137
+
138
+ except Exception as e:
139
+ self.logger.error(f"解析详情页 {response.url} 时出错: {e}")
@@ -0,0 +1,134 @@
1
+ # -*- coding: utf-8 -*-
2
+ import scrapy
3
+ from urllib.parse import urljoin
4
+
5
+ class NewsItem(scrapy.Item):
6
+ title = scrapy.Field()
7
+ publish_time = scrapy.Field()
8
+ url = scrapy.Field()
9
+ source = scrapy.Field()
10
+ content = scrapy.Field()
11
+
12
+ class OfweekScrapyTestSpider(scrapy.Spider):
13
+ name = 'ofweek_scrapy_test'
14
+ allowed_domains = ['ee.ofweek.com']
15
+
16
+ def start_requests(self):
17
+ headers = {
18
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
19
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
20
+ "Cache-Control": "no-cache",
21
+ "Connection": "keep-alive",
22
+ "Pragma": "no-cache",
23
+ "Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
24
+ "Sec-Fetch-Dest": "document",
25
+ "Sec-Fetch-Mode": "navigate",
26
+ "Sec-Fetch-Site": "same-origin",
27
+ "Sec-Fetch-User": "?1",
28
+ "Upgrade-Insecure-Requests": "1",
29
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
30
+ }
31
+ cookies = {
32
+ "__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
33
+ "Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
34
+ "Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
35
+ "JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
36
+ "Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
37
+ "HMACCOUNT": "08DF0D235A291EAA",
38
+ "__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
39
+ "__utmc": "57425525",
40
+ "__utmt": "1",
41
+ "__utmb": "57425525.2.10.1757477622",
42
+ "Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
43
+ "index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
44
+ }
45
+
46
+ # 使用较少的页数进行测试
47
+ max_page = 5
48
+ for page in range(1, max_page + 1):
49
+ url = f'https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html'
50
+ yield scrapy.Request(
51
+ url=url,
52
+ callback=self.parse,
53
+ headers=headers,
54
+ cookies=cookies
55
+ )
56
+
57
+ def parse(self, response):
58
+ self.logger.info(f'正在解析页面: {response.url}')
59
+
60
+ rows = response.xpath(
61
+ '//div[@class="main_left"]/div[@class="list_model"]/div[@class="model_right model_right2"]')
62
+ self.logger.info(f"在页面 {response.url} 中找到 {len(rows)} 个条目")
63
+
64
+ for row in rows:
65
+ try:
66
+ # 提取URL和标题
67
+ url = row.xpath('./h3/a/@href').extract_first()
68
+ title = row.xpath('./h3/a/text()').extract_first()
69
+
70
+ # 容错处理
71
+ if not url:
72
+ self.logger.warning(f"条目缺少URL,跳过")
73
+ continue
74
+
75
+ if not title:
76
+ self.logger.warning(f"条目缺少标题,跳过")
77
+ continue
78
+
79
+ # 确保 URL 是绝对路径
80
+ absolute_url = urljoin(response.url, url)
81
+
82
+ # 验证URL格式
83
+ if not absolute_url.startswith(('http://', 'https://')):
84
+ self.logger.warning(f"无效的URL格式,跳过: {absolute_url}")
85
+ continue
86
+
87
+ self.logger.info(f"提取到详情页链接: {absolute_url}, 标题: {title}")
88
+ yield scrapy.Request(
89
+ url=absolute_url,
90
+ meta={
91
+ "title": title.strip() if title else '',
92
+ "parent_url": response.url
93
+ },
94
+ callback=self.parse_detail
95
+ )
96
+ except Exception as e:
97
+ self.logger.error(f"处理条目时出错: {e}")
98
+ continue
99
+
100
+ def parse_detail(self, response):
101
+ self.logger.info(f'正在解析详情页: {response.url}')
102
+
103
+ try:
104
+ title = response.meta.get('title', '')
105
+
106
+ # 提取内容,增加容错处理
107
+ content_elements = response.xpath('//div[@class="TRS_Editor"]|//*[@id="articleC"]')
108
+ if content_elements:
109
+ content = content_elements.xpath('.//text()').extract()
110
+ content = '\n'.join([text.strip() for text in content if text.strip()])
111
+ else:
112
+ content = ''
113
+ self.logger.warning(f"未找到内容区域: {response.url}")
114
+
115
+ # 提取发布时间
116
+ publish_time = response.xpath('//div[@class="time fl"]/text()').extract_first()
117
+ if publish_time:
118
+ publish_time = publish_time.strip()
119
+
120
+ source = response.xpath('//div[@class="source-name"]/text()').extract_first()
121
+
122
+ # 创建数据项
123
+ item = NewsItem()
124
+ item['title'] = title.strip() if title else ''
125
+ item['publish_time'] = publish_time if publish_time else ''
126
+ item['url'] = response.url
127
+ item['source'] = source if source else ''
128
+ item['content'] = content
129
+
130
+ self.logger.info(f"成功提取详情页数据: {item['title']}")
131
+ yield item
132
+
133
+ except Exception as e:
134
+ self.logger.error(f"解析详情页 {response.url} 时出错: {e}")
@@ -0,0 +1,120 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 简单测试所有 crawlo 命令
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ import subprocess
10
+
11
+ # 添加项目根目录到Python路径
12
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
13
+
14
+ def run_command(cmd, cwd=None):
15
+ """运行命令并返回结果"""
16
+ try:
17
+ result = subprocess.run(
18
+ cmd,
19
+ shell=True,
20
+ cwd=cwd,
21
+ capture_output=True,
22
+ text=True,
23
+ timeout=30
24
+ )
25
+ return result.returncode, result.stdout, result.stderr
26
+ except subprocess.TimeoutExpired:
27
+ return -1, "", "Command timed out"
28
+ except Exception as e:
29
+ return -1, "", str(e)
30
+
31
+ def test_help_command():
32
+ """测试 help 命令"""
33
+ print("测试 help 命令...")
34
+
35
+ # 测试 -h 参数
36
+ code, stdout, stderr = run_command("python -m crawlo.cli -h")
37
+ assert code == 0, f"help 命令失败: {stderr}"
38
+ assert "Crawlo" in stdout, "help 输出不包含框架名称"
39
+
40
+ # 测试 --help 参数
41
+ code, stdout, stderr = run_command("python -m crawlo.cli --help")
42
+ assert code == 0, f"help 命令失败: {stderr}"
43
+ assert "Crawlo" in stdout, "help 输出不包含框架名称"
44
+
45
+ print("✅ help 命令测试通过")
46
+
47
+ def test_version_command():
48
+ """测试 version 命令"""
49
+ print("测试 version 命令...")
50
+
51
+ # 测试 -v 参数
52
+ code, stdout, stderr = run_command("python -m crawlo.cli -v")
53
+ assert code == 0, f"version 命令失败: {stderr}"
54
+ assert "Crawlo" in stdout, "version 输出不包含框架名称"
55
+
56
+ # 测试 --version 参数
57
+ code, stdout, stderr = run_command("python -m crawlo.cli --version")
58
+ assert code == 0, f"version 命令失败: {stderr}"
59
+ assert "Crawlo" in stdout, "version 输出不包含框架名称"
60
+
61
+ print("✅ version 命令测试通过")
62
+
63
+ def test_command_help():
64
+ """测试各命令的帮助信息"""
65
+ print("测试各命令的帮助信息...")
66
+
67
+ commands = ["startproject", "genspider", "run", "check", "list", "stats"]
68
+
69
+ for command in commands:
70
+ code, stdout, stderr = run_command(f"python -m crawlo.cli {command} --help")
71
+ # 命令帮助通常返回非0状态码,但我们检查输出
72
+ assert len(stdout) > 0 or len(stderr) > 0, f"{command} 命令帮助无输出"
73
+ print(f"✅ {command} 命令帮助测试通过")
74
+
75
+ def test_invalid_command():
76
+ """测试无效命令"""
77
+ print("测试无效命令...")
78
+
79
+ code, stdout, stderr = run_command("python -m crawlo.cli invalid_command")
80
+ assert code != 0, "无效命令应该返回非0状态码"
81
+ assert "Unknown command" in stderr or "Unknown command" in stdout, "应该提示未知命令"
82
+
83
+ print("✅ 无效命令测试通过")
84
+
85
+ def main():
86
+ """主函数"""
87
+ print("开始简单测试所有 crawlo 命令...")
88
+ print("=" * 50)
89
+
90
+ try:
91
+ # 测试 help 命令
92
+ test_help_command()
93
+ print()
94
+
95
+ # 测试 version 命令
96
+ test_version_command()
97
+ print()
98
+
99
+ # 测试各命令的帮助信息
100
+ test_command_help()
101
+ print()
102
+
103
+ # 测试无效命令
104
+ test_invalid_command()
105
+ print()
106
+
107
+ print("=" * 50)
108
+ print("所有命令简单测试通过!")
109
+
110
+ except Exception as e:
111
+ print("=" * 50)
112
+ print(f"测试失败: {e}")
113
+ import traceback
114
+ traceback.print_exc()
115
+ return 1
116
+
117
+ return 0
118
+
119
+ if __name__ == "__main__":
120
+ sys.exit(main())