crawlo 1.4.3__py3-none-any.whl → 1.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (107) hide show
  1. crawlo/__init__.py +11 -15
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/genspider.py +52 -17
  4. crawlo/commands/startproject.py +24 -0
  5. crawlo/core/engine.py +2 -2
  6. crawlo/core/scheduler.py +4 -4
  7. crawlo/crawler.py +13 -6
  8. crawlo/downloader/__init__.py +5 -2
  9. crawlo/extension/__init__.py +2 -2
  10. crawlo/filters/aioredis_filter.py +8 -1
  11. crawlo/filters/memory_filter.py +8 -1
  12. crawlo/initialization/built_in.py +13 -4
  13. crawlo/initialization/core.py +5 -4
  14. crawlo/interfaces.py +24 -0
  15. crawlo/middleware/__init__.py +7 -4
  16. crawlo/middleware/middleware_manager.py +15 -8
  17. crawlo/mode_manager.py +45 -11
  18. crawlo/network/response.py +374 -69
  19. crawlo/pipelines/mysql_pipeline.py +6 -6
  20. crawlo/pipelines/pipeline_manager.py +2 -2
  21. crawlo/project.py +2 -4
  22. crawlo/queue/pqueue.py +2 -6
  23. crawlo/queue/queue_manager.py +1 -2
  24. crawlo/settings/default_settings.py +15 -30
  25. crawlo/task_manager.py +2 -2
  26. crawlo/templates/project/items.py.tmpl +2 -2
  27. crawlo/templates/project/middlewares.py.tmpl +9 -89
  28. crawlo/templates/project/pipelines.py.tmpl +8 -68
  29. crawlo/templates/project/settings.py.tmpl +51 -65
  30. crawlo/templates/project/settings_distributed.py.tmpl +59 -67
  31. crawlo/templates/project/settings_gentle.py.tmpl +45 -40
  32. crawlo/templates/project/settings_high_performance.py.tmpl +45 -40
  33. crawlo/templates/project/settings_minimal.py.tmpl +37 -26
  34. crawlo/templates/project/settings_simple.py.tmpl +45 -40
  35. crawlo/templates/run.py.tmpl +3 -7
  36. crawlo/tools/__init__.py +0 -11
  37. crawlo/utils/__init__.py +17 -1
  38. crawlo/utils/db_helper.py +220 -319
  39. crawlo/utils/error_handler.py +313 -67
  40. crawlo/utils/fingerprint.py +3 -4
  41. crawlo/utils/misc.py +82 -0
  42. crawlo/utils/request.py +55 -66
  43. crawlo/utils/selector_helper.py +138 -0
  44. crawlo/utils/spider_loader.py +185 -45
  45. crawlo/utils/text_helper.py +95 -0
  46. crawlo-1.4.5.dist-info/METADATA +329 -0
  47. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/RECORD +89 -68
  48. tests/bug_check_test.py +251 -0
  49. tests/direct_selector_helper_test.py +97 -0
  50. tests/ofweek_scrapy/ofweek_scrapy/items.py +12 -0
  51. tests/ofweek_scrapy/ofweek_scrapy/middlewares.py +100 -0
  52. tests/ofweek_scrapy/ofweek_scrapy/pipelines.py +13 -0
  53. tests/ofweek_scrapy/ofweek_scrapy/settings.py +85 -0
  54. tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -0
  55. tests/ofweek_scrapy/ofweek_scrapy/spiders/ofweek_spider.py +162 -0
  56. tests/ofweek_scrapy/scrapy.cfg +11 -0
  57. tests/performance_comparison.py +4 -5
  58. tests/simple_crawlo_test.py +1 -2
  59. tests/simple_follow_test.py +39 -0
  60. tests/simple_response_selector_test.py +95 -0
  61. tests/simple_selector_helper_test.py +155 -0
  62. tests/simple_selector_test.py +208 -0
  63. tests/simple_url_test.py +74 -0
  64. tests/test_crawler_process_import.py +39 -0
  65. tests/test_crawler_process_spider_modules.py +48 -0
  66. tests/test_edge_cases.py +7 -5
  67. tests/test_encoding_core.py +57 -0
  68. tests/test_encoding_detection.py +127 -0
  69. tests/test_factory_compatibility.py +197 -0
  70. tests/test_multi_directory.py +68 -0
  71. tests/test_multiple_spider_modules.py +81 -0
  72. tests/test_optimized_selector_naming.py +101 -0
  73. tests/test_priority_behavior.py +18 -18
  74. tests/test_response_follow.py +105 -0
  75. tests/test_response_selector_methods.py +93 -0
  76. tests/test_response_url_methods.py +71 -0
  77. tests/test_response_urljoin.py +87 -0
  78. tests/test_scrapy_style_encoding.py +113 -0
  79. tests/test_selector_helper.py +101 -0
  80. tests/test_selector_optimizations.py +147 -0
  81. tests/test_spider_loader.py +50 -0
  82. tests/test_spider_loader_comprehensive.py +70 -0
  83. tests/test_spider_modules.py +85 -0
  84. tests/test_spiders/__init__.py +1 -0
  85. tests/test_spiders/test_spider.py +10 -0
  86. crawlo/tools/anti_crawler.py +0 -269
  87. crawlo/utils/class_loader.py +0 -26
  88. crawlo/utils/enhanced_error_handler.py +0 -357
  89. crawlo-1.4.3.dist-info/METADATA +0 -190
  90. examples/test_project/__init__.py +0 -7
  91. examples/test_project/run.py +0 -35
  92. examples/test_project/test_project/__init__.py +0 -4
  93. examples/test_project/test_project/items.py +0 -18
  94. examples/test_project/test_project/middlewares.py +0 -119
  95. examples/test_project/test_project/pipelines.py +0 -97
  96. examples/test_project/test_project/settings.py +0 -170
  97. examples/test_project/test_project/spiders/__init__.py +0 -10
  98. examples/test_project/test_project/spiders/of_week_dis.py +0 -144
  99. tests/simple_log_test.py +0 -58
  100. tests/simple_test.py +0 -48
  101. tests/test_framework_logger.py +0 -67
  102. tests/test_framework_startup.py +0 -65
  103. tests/test_mode_change.py +0 -73
  104. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/WHEEL +0 -0
  105. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/entry_points.txt +0 -0
  106. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/top_level.txt +0 -0
  107. /tests/{final_command_test_report.md → ofweek_scrapy/ofweek_scrapy/__init__.py} +0 -0
@@ -0,0 +1,97 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 直接测试选择器辅助工具
5
+ """
6
+ import sys
7
+ import os
8
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
9
+
10
+ # 直接导入工具模块
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'crawlo', 'utils'))
12
+ import selector_helper as sh
13
+ from parsel import Selector
14
+
15
+
16
+ def test_direct_selector_helper():
17
+ """直接测试选择器辅助工具"""
18
+ print("直接测试选择器辅助工具...")
19
+ print("=" * 50)
20
+
21
+ # 创建测试HTML
22
+ html_content = """
23
+ <html>
24
+ <head>
25
+ <title>测试页面</title>
26
+ </head>
27
+ <body>
28
+ <div class="content">
29
+ <h1>主标题</h1>
30
+ <p class="intro">介绍段落</p>
31
+ <ul class="list">
32
+ <li>项目1</li>
33
+ <li>项目2</li>
34
+ <li>项目3</li>
35
+ </ul>
36
+ <a href="https://example.com" class="link">链接文本</a>
37
+ <img src="image.jpg" alt="图片描述" class="image">
38
+ </div>
39
+ </body>
40
+ </html>
41
+ """
42
+
43
+ selector = Selector(text=html_content)
44
+
45
+ # 测试 is_xpath
46
+ print("1. 测试 is_xpath:")
47
+ print(f" '/' 开头: {sh.is_xpath('/')}")
48
+ print(f" '//' 开头: {sh.is_xpath('//title')}")
49
+ print(f" './' 开头: {sh.is_xpath('./div')}")
50
+ print(f" 'title' 开头: {sh.is_xpath('title')}")
51
+ print()
52
+
53
+ # 测试 extract_text
54
+ print("2. 测试 extract_text:")
55
+ title_elements = selector.css('title')
56
+ title_text = sh.extract_text(title_elements)
57
+ print(f" 标题文本: {title_text}")
58
+
59
+ h1_elements = selector.css('.content h1')
60
+ h1_text = sh.extract_text(h1_elements)
61
+ print(f" H1文本: {h1_text}")
62
+ print()
63
+
64
+ # 测试 extract_texts
65
+ print("3. 测试 extract_texts:")
66
+ li_elements = selector.css('.list li')
67
+ li_texts = sh.extract_texts(li_elements)
68
+ print(f" 列表项文本: {li_texts}")
69
+ print()
70
+
71
+ # 测试 extract_attr
72
+ print("4. 测试 extract_attr:")
73
+ link_elements = selector.css('.link')
74
+ link_href = sh.extract_attr(link_elements, 'href')
75
+ print(f" 链接href: {link_href}")
76
+
77
+ img_elements = selector.css('.image')
78
+ img_alt = sh.extract_attr(img_elements, 'alt')
79
+ print(f" 图片alt: {img_alt}")
80
+ print()
81
+
82
+ # 测试 extract_attrs
83
+ print("5. 测试 extract_attrs:")
84
+ all_links = selector.css('a')
85
+ all_hrefs = sh.extract_attrs(all_links, 'href')
86
+ print(f" 所有链接href: {all_hrefs}")
87
+
88
+ all_images = selector.css('img')
89
+ all_srcs = sh.extract_attrs(all_images, 'src')
90
+ print(f" 所有图片src: {all_srcs}")
91
+ print()
92
+
93
+ print("所有测试完成!")
94
+
95
+
96
+ if __name__ == '__main__':
97
+ test_direct_selector_helper()
@@ -0,0 +1,12 @@
1
+ # Define here the models for your scraped items
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/items.html
5
+
6
+ import scrapy
7
+
8
+
9
+ class OfweekScrapyItem(scrapy.Item):
10
+ # define the fields for your item here like:
11
+ # name = scrapy.Field()
12
+ pass
@@ -0,0 +1,100 @@
1
+ # Define here the models for your spider middleware
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
5
+
6
+ from scrapy import signals
7
+
8
+ # useful for handling different item types with a single interface
9
+ from itemadapter import ItemAdapter
10
+
11
+
12
+ class OfweekScrapySpiderMiddleware:
13
+ # Not all methods need to be defined. If a method is not defined,
14
+ # scrapy acts as if the spider middleware does not modify the
15
+ # passed objects.
16
+
17
+ @classmethod
18
+ def from_crawler(cls, crawler):
19
+ # This method is used by Scrapy to create your spiders.
20
+ s = cls()
21
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
22
+ return s
23
+
24
+ def process_spider_input(self, response, spider):
25
+ # Called for each response that goes through the spider
26
+ # middleware and into the spider.
27
+
28
+ # Should return None or raise an exception.
29
+ return None
30
+
31
+ def process_spider_output(self, response, result, spider):
32
+ # Called with the results returned from the Spider, after
33
+ # it has processed the response.
34
+
35
+ # Must return an iterable of Request, or item objects.
36
+ for i in result:
37
+ yield i
38
+
39
+ def process_spider_exception(self, response, exception, spider):
40
+ # Called when a spider or process_spider_input() method
41
+ # (from other spider middleware) raises an exception.
42
+
43
+ # Should return either None or an iterable of Request or item objects.
44
+ pass
45
+
46
+ async def process_start(self, start):
47
+ # Called with an async iterator over the spider start() method or the
48
+ # maching method of an earlier spider middleware.
49
+ async for item_or_request in start:
50
+ yield item_or_request
51
+
52
+ def spider_opened(self, spider):
53
+ spider.logger.info("Spider opened: %s" % spider.name)
54
+
55
+
56
+ class OfweekScrapyDownloaderMiddleware:
57
+ # Not all methods need to be defined. If a method is not defined,
58
+ # scrapy acts as if the downloader middleware does not modify the
59
+ # passed objects.
60
+
61
+ @classmethod
62
+ def from_crawler(cls, crawler):
63
+ # This method is used by Scrapy to create your spiders.
64
+ s = cls()
65
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
66
+ return s
67
+
68
+ def process_request(self, request, spider):
69
+ # Called for each request that goes through the downloader
70
+ # middleware.
71
+
72
+ # Must either:
73
+ # - return None: continue processing this request
74
+ # - or return a Response object
75
+ # - or return a Request object
76
+ # - or raise IgnoreRequest: process_exception() methods of
77
+ # installed downloader middleware will be called
78
+ return None
79
+
80
+ def process_response(self, request, response, spider):
81
+ # Called with the response returned from the downloader.
82
+
83
+ # Must either;
84
+ # - return a Response object
85
+ # - return a Request object
86
+ # - or raise IgnoreRequest
87
+ return response
88
+
89
+ def process_exception(self, request, exception, spider):
90
+ # Called when a download handler or a process_request()
91
+ # (from other downloader middleware) raises an exception.
92
+
93
+ # Must either:
94
+ # - return None: continue processing this exception
95
+ # - return a Response object: stops process_exception() chain
96
+ # - return a Request object: stops process_exception() chain
97
+ pass
98
+
99
+ def spider_opened(self, spider):
100
+ spider.logger.info("Spider opened: %s" % spider.name)
@@ -0,0 +1,13 @@
1
+ # Define your item pipelines here
2
+ #
3
+ # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4
+ # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5
+
6
+
7
+ # useful for handling different item types with a single interface
8
+ from itemadapter import ItemAdapter
9
+
10
+
11
+ class OfweekScrapyPipeline:
12
+ def process_item(self, item, spider):
13
+ return item
@@ -0,0 +1,85 @@
1
+ # Scrapy settings for ofweek_scrapy project
2
+
3
+ BOT_NAME = 'ofweek_scrapy'
4
+
5
+ SPIDER_MODULES = ['ofweek_scrapy.spiders']
6
+ NEWSPIDER_MODULE = 'ofweek_scrapy.spiders'
7
+
8
+ # Obey robots.txt rules
9
+ ROBOTSTXT_OBEY = False
10
+
11
+ # Configure maximum concurrent requests performed by Scrapy (default: 16)
12
+ # 使用与Crawlo相同的并发数
13
+ CONCURRENT_REQUESTS = 8
14
+
15
+ # Configure a delay for requests for the same website (default: 0)
16
+ # 使用与Crawlo相同的下载延迟
17
+ DOWNLOAD_DELAY = 1.0
18
+ # The download delay setting will honor only one of:
19
+ #CONCURRENT_REQUESTS_PER_DOMAIN = 16
20
+ #CONCURRENT_REQUESTS_PER_IP = 16
21
+
22
+ # Disable cookies (enabled by default)
23
+ #COOKIES_ENABLED = False
24
+
25
+ # Disable Telnet Console (enabled by default)
26
+ #TELNETCONSOLE_ENABLED = False
27
+
28
+ # Override the default request headers:
29
+ #DEFAULT_REQUEST_HEADERS = {
30
+ # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
31
+ # 'Accept-Language': 'en',
32
+ #}
33
+
34
+ # Enable or disable spider middlewares
35
+ # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
36
+ #SPIDER_MIDDLEWARES = {
37
+ # 'ofweek_scrapy.middlewares.OfweekScrapySpiderMiddleware': 543,
38
+ #}
39
+
40
+ # Enable or disable downloader middlewares
41
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
42
+ #DOWNLOADER_MIDDLEWARES = {
43
+ # 'ofweek_scrapy.middlewares.OfweekScrapyDownloaderMiddleware': 543,
44
+ #}
45
+
46
+ # Enable or disable extensions
47
+ # See https://docs.scrapy.org/en/latest/topics/extensions.html
48
+ #EXTENSIONS = {
49
+ # 'scrapy.extensions.telnet.TelnetConsole': None,
50
+ #}
51
+
52
+ # Configure item pipelines
53
+ # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
54
+ #ITEM_PIPELINES = {
55
+ # 'ofweek_scrapy.pipelines.OfweekScrapyPipeline': 300,
56
+ #}
57
+
58
+ # Enable and configure the AutoThrottle extension (disabled by default)
59
+ # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
60
+ #AUTOTHROTTLE_ENABLED = True
61
+ # The initial download delay
62
+ #AUTOTHROTTLE_START_DELAY = 5
63
+ # The maximum download delay to be set in case of high latencies
64
+ #AUTOTHROTTLE_MAX_DELAY = 60
65
+ # The average number of requests Scrapy should be sending in parallel to
66
+ # each remote server
67
+ #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
68
+ # Enable showing throttling stats for every response received:
69
+ #AUTOTHROTTLE_DEBUG = False
70
+
71
+ # Enable and configure HTTP caching (disabled by default)
72
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
73
+ #HTTPCACHE_ENABLED = True
74
+ #HTTPCACHE_EXPIRATION_SECS = 0
75
+ #HTTPCACHE_DIR = 'httpcache'
76
+ #HTTPCACHE_IGNORE_HTTP_CODES = []
77
+ #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
78
+
79
+ # Set settings whose default value is deprecated to a future-proof value
80
+ REQUEST_FINGERPRINTER_IMPLEMENTATION = '2.7'
81
+ TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
82
+ FEED_EXPORT_ENCODING = 'utf-8'
83
+
84
+ # 日志配置
85
+ LOG_LEVEL = 'INFO'
@@ -0,0 +1,4 @@
1
+ # This package will contain the spiders of your Scrapy project
2
+ #
3
+ # Please refer to the documentation for information on how to create and manage
4
+ # your spiders.
@@ -0,0 +1,162 @@
1
+ import scrapy
2
+ from urllib.parse import urljoin
3
+
4
+ class OfweekSpider(scrapy.Spider):
5
+ name = 'ofweek'
6
+ allowed_domains = ['ee.ofweek.com']
7
+
8
+ def start_requests(self):
9
+ """生成初始请求"""
10
+ headers = {
11
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
12
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
13
+ "Cache-Control": "no-cache",
14
+ "Connection": "keep-alive",
15
+ "Pragma": "no-cache",
16
+ "Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
17
+ "Sec-Fetch-Dest": "document",
18
+ "Sec-Fetch-Mode": "navigate",
19
+ "Sec-Fetch-Site": "same-origin",
20
+ "Sec-Fetch-User": "?1",
21
+ "Upgrade-Insecure-Requests": "1",
22
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
23
+ }
24
+ cookies = {
25
+ "__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
26
+ "Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
27
+ "Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
28
+ "JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
29
+ "Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
30
+ "HMACCOUNT": "08DF0D235A291EAA",
31
+ "__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
32
+ "__utmc": "57425525",
33
+ "__utmt": "1",
34
+ "__utmb": "57425525.2.10.1757477622",
35
+ "Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
36
+ "index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
37
+ }
38
+
39
+ # 使用与Crawlo相同的页数进行公平测试
40
+ max_page = 50 # 原来是1851,现在改为50页进行测试
41
+ start_urls = []
42
+ for page in range(1, max_page + 1):
43
+ url = f'https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html'
44
+ start_urls.append(url)
45
+
46
+ self.logger.info(f"生成了 {len(start_urls)} 个起始URL")
47
+
48
+ # 生成请求
49
+ for url in start_urls:
50
+ self.logger.info(f"添加起始URL: {url}")
51
+ try:
52
+ yield scrapy.Request(
53
+ url=url,
54
+ callback=self.parse,
55
+ headers=headers,
56
+ cookies=cookies,
57
+ dont_filter=True
58
+ )
59
+ except Exception as e:
60
+ self.logger.error(f"创建请求失败: {url}, 错误: {e}")
61
+
62
+ self.logger.info("start_requests方法执行完成")
63
+
64
+ def parse(self, response):
65
+ """解析列表页"""
66
+ self.logger.info(f'正在解析页面: {response.url}')
67
+
68
+ # 检查响应状态
69
+ if response.status != 200:
70
+ self.logger.warning(f"页面返回非200状态码: {response.status}, URL: {response.url}")
71
+ return
72
+
73
+ # ================== 数据提取 ==================
74
+ try:
75
+ rows = response.xpath('//div[@class="main_left"]/div[@class="list_model"]/div[@class="model_right model_right2"]')
76
+ self.logger.info(f"在页面 {response.url} 中找到 {len(rows)} 个条目")
77
+
78
+ for row in rows:
79
+ try:
80
+ # 提取URL和标题
81
+ url = row.xpath('./h3/a/@href').extract_first()
82
+ title = row.xpath('./h3/a/text()').extract_first()
83
+
84
+ # 容错处理
85
+ if not url:
86
+ self.logger.warning(f"条目缺少URL,跳过")
87
+ continue
88
+
89
+ if not title:
90
+ self.logger.warning(f"条目缺少标题,跳过")
91
+ continue
92
+
93
+ # 确保 URL 是绝对路径
94
+ absolute_url = response.urljoin(url)
95
+
96
+ # 验证URL格式
97
+ if not absolute_url.startswith(('http://', 'https://')):
98
+ self.logger.warning(f"无效的URL格式,跳过: {absolute_url}")
99
+ continue
100
+
101
+ self.logger.info(f"提取到详情页链接: {absolute_url}, 标题: {title}")
102
+ yield scrapy.Request(
103
+ url=absolute_url,
104
+ meta={
105
+ "title": title.strip() if title else '',
106
+ "parent_url": response.url
107
+ },
108
+ callback=self.parse_detail
109
+ )
110
+ except Exception as e:
111
+ self.logger.error(f"处理条目时出错: {e}")
112
+ continue
113
+
114
+ except Exception as e:
115
+ self.logger.error(f"解析页面 {response.url} 时出错: {e}")
116
+
117
+ def parse_detail(self, response):
118
+ """解析详情页"""
119
+ self.logger.info(f'正在解析详情页: {response.url}')
120
+
121
+ # 检查响应状态
122
+ if response.status != 200:
123
+ self.logger.warning(f"详情页返回非200状态码: {response.status}, URL: {response.url}")
124
+ return
125
+
126
+ try:
127
+ title = response.meta.get('title', '')
128
+
129
+ # 提取内容,增加容错处理
130
+ content_elements = response.xpath('//div[@class="TRS_Editor"]|//*[@id="articleC"]')
131
+ if content_elements:
132
+ content = content_elements.xpath('.//text()').extract()
133
+ content = '\n'.join([text.strip() for text in content if text.strip()])
134
+ else:
135
+ content = ''
136
+ self.logger.warning(f"未找到内容区域: {response.url}")
137
+
138
+ # 提取发布时间
139
+ publish_time = response.xpath('//div[@class="time fl"]/text()').extract_first()
140
+ if publish_time:
141
+ publish_time = publish_time.strip()
142
+
143
+ source = response.xpath('//div[@class="source-name"]/text()').extract_first()
144
+
145
+ # 创建数据项
146
+ item = {
147
+ 'title': title.strip() if title else '',
148
+ 'publish_time': publish_time if publish_time else '',
149
+ 'url': response.url,
150
+ 'source': source if source else '',
151
+ 'content': content
152
+ }
153
+
154
+ # 验证必要字段
155
+ if not item['title']:
156
+ self.logger.warning(f"详情页缺少标题: {response.url}")
157
+
158
+ self.logger.info(f"成功提取详情页数据: {item['title']}")
159
+ yield item
160
+
161
+ except Exception as e:
162
+ self.logger.error(f"解析详情页 {response.url} 时出错: {e}")
@@ -0,0 +1,11 @@
1
+ # Automatically created by: scrapy startproject
2
+ #
3
+ # For more information about the [deploy] section see:
4
+ # https://scrapyd.readthedocs.io/en/latest/deploy.html
5
+
6
+ [settings]
7
+ default = ofweek_scrapy.settings
8
+
9
+ [deploy]
10
+ #url = http://localhost:6800/
11
+ project = ofweek_scrapy
@@ -79,7 +79,6 @@ class OfweekPerformanceSpider(Spider):
79
79
  yield Request(url=url, callback=self.parse, headers=headers, cookies=cookies)
80
80
 
81
81
  def parse(self, response):
82
- from urllib.parse import urljoin
83
82
  rows = response.xpath("//div[@class=\"main_left\"]/div[@class=\"list_model\"]/div[@class=\"model_right model_right2\"]")
84
83
 
85
84
  for row in rows:
@@ -90,7 +89,7 @@ class OfweekPerformanceSpider(Spider):
90
89
  if not url or not title:
91
90
  continue
92
91
 
93
- absolute_url = urljoin(response.url, url)
92
+ absolute_url = response.urljoin(url)
94
93
  if not absolute_url.startswith(("http://", "https://")):
95
94
  continue
96
95
 
@@ -171,11 +170,11 @@ def run_scrapy_test():
171
170
  # 运行Scrapy测试
172
171
  result = subprocess.run([
173
172
  'scrapy', 'runspider',
174
- '/Users/oscar/projects/Crawlo/tests/scrapy_comparison/ofweek_scrapy.py',
173
+ 'D:/dowell/projects/Crawlo/tests/scrapy_comparison/ofweek_scrapy.py',
175
174
  '-s', 'CONCURRENT_REQUESTS=8',
176
175
  '-s', 'DOWNLOAD_DELAY=0.1',
177
176
  '-s', 'LOG_LEVEL=ERROR'
178
- ], capture_output=True, text=True, timeout=300, cwd='/Users/oscar/projects/Crawlo')
177
+ ], capture_output=True, text=True, timeout=300, cwd='D:\dowell\projects\Crawlo')
179
178
 
180
179
  end_time = time.time()
181
180
  execution_time = end_time - start_time
@@ -205,7 +204,7 @@ def main():
205
204
  print("=== Crawlo vs Scrapy 性能对比测试 ===")
206
205
 
207
206
  # 创建测试目录
208
- os.makedirs('/Users/oscar/projects/Crawlo/tests/scrapy_comparison', exist_ok=True)
207
+ os.makedirs(os.path.join('D:\dowell\projects\Crawlo', 'tests', 'scrapy_comparison'), exist_ok=True)
209
208
 
210
209
  # 运行测试
211
210
  crawlo_time, crawlo_pages = run_crawlo_test()
@@ -62,7 +62,6 @@ class OfweekSimpleSpider(Spider):
62
62
  yield Request(url=url, callback=self.parse, headers=headers, cookies=cookies)
63
63
 
64
64
  def parse(self, response):
65
- from urllib.parse import urljoin
66
65
  rows = response.xpath("//div[@class=\"main_left\"]/div[@class=\"list_model\"]/div[@class=\"model_right model_right2\"]")
67
66
 
68
67
  for row in rows:
@@ -73,7 +72,7 @@ class OfweekSimpleSpider(Spider):
73
72
  if not url or not title:
74
73
  continue
75
74
 
76
- absolute_url = urljoin(response.url, url)
75
+ absolute_url = response.urljoin(url)
77
76
  if not absolute_url.startswith(("http://", "https://")):
78
77
  continue
79
78
 
@@ -0,0 +1,39 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ Response.follow 方法简单测试
5
+ """
6
+ import sys
7
+ import os
8
+
9
+ # 添加项目根目录到Python路径
10
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
11
+
12
+ # 直接测试 urljoin 方法
13
+ from urllib.parse import urljoin
14
+
15
+
16
+ def test_urljoin():
17
+ """测试 urljoin 方法"""
18
+ base_url = "https://example.com/test"
19
+
20
+ # 测试绝对URL
21
+ absolute_url = urljoin(base_url, "https://other.com/page")
22
+ print(f"绝对URL: {absolute_url}")
23
+ assert absolute_url == "https://other.com/page"
24
+
25
+ # 测试相对URL
26
+ relative_url = urljoin(base_url, "/relative/path")
27
+ print(f"相对URL: {relative_url}")
28
+ assert relative_url == "https://example.com/relative/path"
29
+
30
+ # 测试复杂相对URL
31
+ complex_url = urljoin(base_url, "../other/path")
32
+ print(f"复杂相对URL: {complex_url}")
33
+ assert complex_url == "https://example.com/other/path"
34
+
35
+ print("所有测试通过!")
36
+
37
+
38
+ if __name__ == '__main__':
39
+ test_urljoin()
@@ -0,0 +1,95 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 简化测试Response类中的选择器方法
5
+ """
6
+ import sys
7
+ import os
8
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
9
+
10
+ # 直接导入Response类
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'crawlo', 'network'))
12
+ from response import Response
13
+
14
+
15
+ def test_response_selector_methods():
16
+ """测试Response类中的选择器方法"""
17
+ print("测试Response类中的选择器方法...")
18
+ print("=" * 50)
19
+
20
+ # 创建测试HTML响应
21
+ html_content = """
22
+ <html>
23
+ <head>
24
+ <title>测试页面</title>
25
+ </head>
26
+ <body>
27
+ <div class="content">
28
+ <h1>主标题</h1>
29
+ <p class="intro">介绍段落</p>
30
+ <ul class="list">
31
+ <li>项目1</li>
32
+ <li>项目2</li>
33
+ <li>项目3</li>
34
+ </ul>
35
+ <a href="https://example.com" class="link">链接文本</a>
36
+ <img src="image.jpg" alt="图片描述" class="image">
37
+ </div>
38
+ </body>
39
+ </html>
40
+ """
41
+
42
+ # 创建Response对象
43
+ response = Response(
44
+ url="https://example.com/test",
45
+ body=html_content.encode('utf-8'),
46
+ headers={"content-type": "text/html; charset=utf-8"}
47
+ )
48
+
49
+ # 测试 extract_text (CSS选择器)
50
+ print("1. 测试 extract_text (CSS选择器):")
51
+ title_text = response.extract_text('title')
52
+ print(f" 标题文本: {title_text}")
53
+
54
+ h1_text = response.extract_text('.content h1')
55
+ print(f" H1文本: {h1_text}")
56
+ print()
57
+
58
+ # 测试 extract_text (XPath选择器)
59
+ print("2. 测试 extract_text (XPath选择器):")
60
+ title_text_xpath = response.extract_text('//title')
61
+ print(f" 标题文本: {title_text_xpath}")
62
+
63
+ h1_text_xpath = response.extract_text('//div[@class="content"]/h1')
64
+ print(f" H1文本: {h1_text_xpath}")
65
+ print()
66
+
67
+ # 测试 extract_texts
68
+ print("3. 测试 extract_texts:")
69
+ li_texts = response.extract_texts('.list li')
70
+ print(f" 列表项文本: {li_texts}")
71
+ print()
72
+
73
+ # 测试 extract_attr
74
+ print("4. 测试 extract_attr:")
75
+ link_href = response.extract_attr('.link', 'href')
76
+ print(f" 链接href: {link_href}")
77
+
78
+ img_alt = response.extract_attr('.image', 'alt')
79
+ print(f" 图片alt: {img_alt}")
80
+ print()
81
+
82
+ # 测试 extract_attrs
83
+ print("5. 测试 extract_attrs:")
84
+ all_links = response.extract_attrs('a', 'href')
85
+ print(f" 所有链接href: {all_links}")
86
+
87
+ all_images = response.extract_attrs('img', 'src')
88
+ print(f" 所有图片src: {all_images}")
89
+ print()
90
+
91
+ print("所有测试完成!")
92
+
93
+
94
+ if __name__ == '__main__':
95
+ test_response_selector_methods()