crawlo 1.4.4__py3-none-any.whl → 1.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (85) hide show
  1. crawlo/__init__.py +11 -15
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/startproject.py +24 -0
  4. crawlo/core/engine.py +2 -2
  5. crawlo/core/scheduler.py +4 -4
  6. crawlo/crawler.py +8 -7
  7. crawlo/downloader/__init__.py +5 -2
  8. crawlo/extension/__init__.py +2 -2
  9. crawlo/filters/aioredis_filter.py +8 -1
  10. crawlo/filters/memory_filter.py +8 -1
  11. crawlo/initialization/built_in.py +13 -4
  12. crawlo/initialization/core.py +5 -4
  13. crawlo/interfaces.py +24 -0
  14. crawlo/middleware/__init__.py +7 -4
  15. crawlo/middleware/middleware_manager.py +15 -8
  16. crawlo/mode_manager.py +45 -11
  17. crawlo/network/response.py +374 -69
  18. crawlo/pipelines/mysql_pipeline.py +6 -6
  19. crawlo/pipelines/pipeline_manager.py +2 -2
  20. crawlo/project.py +2 -4
  21. crawlo/settings/default_settings.py +4 -0
  22. crawlo/task_manager.py +2 -2
  23. crawlo/templates/project/items.py.tmpl +2 -2
  24. crawlo/templates/project/middlewares.py.tmpl +9 -89
  25. crawlo/templates/project/pipelines.py.tmpl +8 -68
  26. crawlo/tools/__init__.py +0 -11
  27. crawlo/utils/__init__.py +17 -1
  28. crawlo/utils/db_helper.py +220 -319
  29. crawlo/utils/error_handler.py +313 -67
  30. crawlo/utils/fingerprint.py +3 -4
  31. crawlo/utils/misc.py +82 -0
  32. crawlo/utils/request.py +55 -66
  33. crawlo/utils/selector_helper.py +138 -0
  34. crawlo/utils/spider_loader.py +185 -45
  35. crawlo/utils/text_helper.py +95 -0
  36. crawlo-1.4.5.dist-info/METADATA +329 -0
  37. {crawlo-1.4.4.dist-info → crawlo-1.4.5.dist-info}/RECORD +76 -49
  38. tests/bug_check_test.py +251 -0
  39. tests/direct_selector_helper_test.py +97 -0
  40. tests/ofweek_scrapy/ofweek_scrapy/items.py +12 -0
  41. tests/ofweek_scrapy/ofweek_scrapy/middlewares.py +100 -0
  42. tests/ofweek_scrapy/ofweek_scrapy/pipelines.py +13 -0
  43. tests/ofweek_scrapy/ofweek_scrapy/settings.py +85 -0
  44. tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -0
  45. tests/ofweek_scrapy/ofweek_scrapy/spiders/ofweek_spider.py +162 -0
  46. tests/ofweek_scrapy/scrapy.cfg +11 -0
  47. tests/performance_comparison.py +4 -5
  48. tests/simple_crawlo_test.py +1 -2
  49. tests/simple_follow_test.py +39 -0
  50. tests/simple_response_selector_test.py +95 -0
  51. tests/simple_selector_helper_test.py +155 -0
  52. tests/simple_selector_test.py +208 -0
  53. tests/simple_url_test.py +74 -0
  54. tests/test_crawler_process_import.py +39 -0
  55. tests/test_crawler_process_spider_modules.py +48 -0
  56. tests/test_edge_cases.py +7 -5
  57. tests/test_encoding_core.py +57 -0
  58. tests/test_encoding_detection.py +127 -0
  59. tests/test_factory_compatibility.py +197 -0
  60. tests/test_optimized_selector_naming.py +101 -0
  61. tests/test_priority_behavior.py +18 -18
  62. tests/test_response_follow.py +105 -0
  63. tests/test_response_selector_methods.py +93 -0
  64. tests/test_response_url_methods.py +71 -0
  65. tests/test_response_urljoin.py +87 -0
  66. tests/test_scrapy_style_encoding.py +113 -0
  67. tests/test_selector_helper.py +101 -0
  68. tests/test_selector_optimizations.py +147 -0
  69. tests/test_spider_loader.py +50 -0
  70. tests/test_spider_loader_comprehensive.py +70 -0
  71. tests/test_spiders/__init__.py +1 -0
  72. tests/test_spiders/test_spider.py +10 -0
  73. crawlo/tools/anti_crawler.py +0 -269
  74. crawlo/utils/class_loader.py +0 -26
  75. crawlo/utils/enhanced_error_handler.py +0 -357
  76. crawlo-1.4.4.dist-info/METADATA +0 -190
  77. tests/simple_log_test.py +0 -58
  78. tests/simple_test.py +0 -48
  79. tests/test_framework_logger.py +0 -67
  80. tests/test_framework_startup.py +0 -65
  81. tests/test_mode_change.py +0 -73
  82. {crawlo-1.4.4.dist-info → crawlo-1.4.5.dist-info}/WHEEL +0 -0
  83. {crawlo-1.4.4.dist-info → crawlo-1.4.5.dist-info}/entry_points.txt +0 -0
  84. {crawlo-1.4.4.dist-info → crawlo-1.4.5.dist-info}/top_level.txt +0 -0
  85. /tests/{final_command_test_report.md → ofweek_scrapy/ofweek_scrapy/__init__.py} +0 -0
@@ -0,0 +1,251 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 框架bug检查测试脚本
5
+ 全面测试框架的核心功能,查找潜在的bug
6
+ """
7
+
8
+ import sys
9
+ import os
10
+ import asyncio
11
+ import traceback
12
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
13
+
14
+ from crawlo.spider import Spider
15
+ from crawlo import Request
16
+ from crawlo.network.response import Response
17
+
18
+
19
+ class TestSpider(Spider):
20
+ """测试爬虫"""
21
+ name = 'bug_check_spider'
22
+
23
+ def start_requests(self):
24
+ """发起测试请求"""
25
+ # 生成一些测试请求
26
+ yield Request('https://httpbin.org/get', callback=self.parse)
27
+
28
+ def parse(self, response):
29
+ """解析响应"""
30
+ print(f"成功获取响应: {response.url}")
31
+ print(f"状态码: {response.status_code}")
32
+ # 测试Response的各种方法
33
+ self.test_response_methods(response)
34
+ return []
35
+
36
+ def test_response_methods(self, response):
37
+ """测试Response的各种方法"""
38
+ print("测试Response方法...")
39
+
40
+ # 测试URL处理方法
41
+ try:
42
+ joined_url = response.urljoin('/test')
43
+ print(f" urljoin测试通过: {joined_url}")
44
+ except Exception as e:
45
+ print(f" urljoin测试失败: {e}")
46
+
47
+ try:
48
+ parsed = response.urlparse()
49
+ print(f" urlparse测试通过: {parsed}")
50
+ except Exception as e:
51
+ print(f" urlparse测试失败: {e}")
52
+
53
+ # 测试选择器方法
54
+ try:
55
+ # 测试提取文本
56
+ text = response.extract_text('title', default='No title')
57
+ print(f" extract_text测试通过: {text}")
58
+ except Exception as e:
59
+ print(f" extract_text测试失败: {e}")
60
+
61
+ try:
62
+ # 测试提取多个文本
63
+ texts = response.extract_texts('h1,h2,h3', default=[])
64
+ print(f" extract_texts测试通过: {texts}")
65
+ except Exception as e:
66
+ print(f" extract_texts测试失败: {e}")
67
+
68
+ try:
69
+ # 测试提取属性
70
+ attr = response.extract_attr('a', 'href', default='')
71
+ print(f" extract_attr测试通过: {attr}")
72
+ except Exception as e:
73
+ print(f" extract_attr测试失败: {e}")
74
+
75
+ try:
76
+ # 测试提取多个属性
77
+ attrs = response.extract_attrs('a', 'href', default=[])
78
+ print(f" extract_attrs测试通过: {attrs}")
79
+ except Exception as e:
80
+ print(f" extract_attrs测试失败: {e}")
81
+
82
+
83
+ async def test_request_serialization():
84
+ """测试Request序列化"""
85
+ print("测试Request序列化...")
86
+
87
+ try:
88
+ # 创建一个复杂的Request对象
89
+ request = Request(
90
+ url='https://example.com/test',
91
+ method='POST',
92
+ headers={'User-Agent': 'Test'},
93
+ json_body={'key': 'value'},
94
+ meta={'test': 'data'},
95
+ priority=5
96
+ )
97
+
98
+ # 测试pickle序列化
99
+ import pickle
100
+ serialized = pickle.dumps(request)
101
+ deserialized = pickle.loads(serialized)
102
+ print(f" Request序列化测试通过: {deserialized.url}")
103
+ return True
104
+ except Exception as e:
105
+ print(f" Request序列化测试失败: {e}")
106
+ traceback.print_exc()
107
+ return False
108
+
109
+
110
+ async def test_queue_operations():
111
+ """测试队列操作"""
112
+ print("测试队列操作...")
113
+
114
+ try:
115
+ from crawlo.queue.queue_manager import QueueConfig, QueueManager, QueueType
116
+
117
+ # 创建内存队列配置
118
+ queue_config = QueueConfig(
119
+ queue_type=QueueType.MEMORY,
120
+ max_queue_size=10
121
+ )
122
+
123
+ # 创建队列管理器
124
+ queue_manager = QueueManager(queue_config)
125
+ await queue_manager.initialize()
126
+
127
+ # 测试添加请求
128
+ request = Request('https://example.com/test')
129
+ success = await queue_manager.put(request)
130
+ print(f" 队列添加请求: {'成功' if success else '失败'}")
131
+
132
+ # 测试获取请求
133
+ retrieved = await queue_manager.get(timeout=1.0)
134
+ print(f" 队列获取请求: {'成功' if retrieved else '失败'}")
135
+
136
+ # 关闭队列
137
+ await queue_manager.close()
138
+ print(" 队列操作测试完成")
139
+ return True
140
+ except Exception as e:
141
+ print(f" 队列操作测试失败: {e}")
142
+ traceback.print_exc()
143
+ return False
144
+
145
+
146
+ async def test_redis_queue_operations():
147
+ """测试Redis队列操作"""
148
+ print("测试Redis队列操作...")
149
+
150
+ try:
151
+ from crawlo.queue.queue_manager import QueueConfig, QueueManager, QueueType
152
+
153
+ # 创建Redis队列配置
154
+ queue_config = QueueConfig(
155
+ queue_type=QueueType.REDIS,
156
+ redis_url='redis://127.0.0.1:6379/15', # 使用测试数据库
157
+ queue_name='test:bug_check',
158
+ max_queue_size=10
159
+ )
160
+
161
+ # 创建队列管理器
162
+ queue_manager = QueueManager(queue_config)
163
+ initialized = await queue_manager.initialize()
164
+ if not initialized:
165
+ print(" Redis队列初始化失败,跳过测试")
166
+ return True
167
+
168
+ # 测试添加请求
169
+ request = Request('https://example.com/test_redis')
170
+ success = await queue_manager.put(request)
171
+ print(f" Redis队列添加请求: {'成功' if success else '失败'}")
172
+
173
+ # 测试获取请求
174
+ retrieved = await queue_manager.get(timeout=1.0)
175
+ print(f" Redis队列获取请求: {'成功' if retrieved else '失败'}")
176
+
177
+ # 关闭队列
178
+ await queue_manager.close()
179
+ print(" Redis队列操作测试完成")
180
+ return True
181
+ except Exception as e:
182
+ print(f" Redis队列操作测试失败: {e}")
183
+ # 不将Redis连接失败视为测试失败
184
+ return True
185
+
186
+
187
+ async def test_spider_registration():
188
+ """测试爬虫注册"""
189
+ print("测试爬虫注册...")
190
+
191
+ try:
192
+ from crawlo.spider import get_spider_by_name, get_spider_names
193
+
194
+ # 检查测试爬虫是否已注册
195
+ spider_class = get_spider_by_name('bug_check_spider')
196
+ if spider_class:
197
+ print(f" 爬虫注册测试通过: {spider_class.__name__}")
198
+ else:
199
+ print(" 爬虫注册测试失败: 未找到注册的爬虫")
200
+ return False
201
+
202
+ # 检查所有注册的爬虫
203
+ spider_names = get_spider_names()
204
+ print(f" 已注册的爬虫: {spider_names}")
205
+ return True
206
+ except Exception as e:
207
+ print(f" 爬虫注册测试失败: {e}")
208
+ traceback.print_exc()
209
+ return False
210
+
211
+
212
+ async def main():
213
+ """主函数"""
214
+ print("开始框架bug检查测试...")
215
+ print("=" * 50)
216
+
217
+ tests = [
218
+ test_request_serialization,
219
+ test_queue_operations,
220
+ test_redis_queue_operations,
221
+ test_spider_registration,
222
+ ]
223
+
224
+ passed = 0
225
+ total = len(tests)
226
+
227
+ for test_func in tests:
228
+ try:
229
+ if await test_func():
230
+ passed += 1
231
+ print(f"✓ {test_func.__name__} 通过")
232
+ else:
233
+ print(f"✗ {test_func.__name__} 失败")
234
+ except Exception as e:
235
+ print(f"✗ {test_func.__name__} 异常: {e}")
236
+ print()
237
+
238
+ print("=" * 50)
239
+ print(f"测试结果: {passed}/{total} 通过")
240
+
241
+ if passed == total:
242
+ print("所有测试通过!框架核心功能正常。")
243
+ return 0
244
+ else:
245
+ print("部分测试失败,请检查框架实现。")
246
+ return 1
247
+
248
+
249
+ if __name__ == "__main__":
250
+ exit_code = asyncio.run(main())
251
+ exit(exit_code)
@@ -0,0 +1,97 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ 直接测试选择器辅助工具
5
+ """
6
+ import sys
7
+ import os
8
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
9
+
10
+ # 直接导入工具模块
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'crawlo', 'utils'))
12
+ import selector_helper as sh
13
+ from parsel import Selector
14
+
15
+
16
+ def test_direct_selector_helper():
17
+ """直接测试选择器辅助工具"""
18
+ print("直接测试选择器辅助工具...")
19
+ print("=" * 50)
20
+
21
+ # 创建测试HTML
22
+ html_content = """
23
+ <html>
24
+ <head>
25
+ <title>测试页面</title>
26
+ </head>
27
+ <body>
28
+ <div class="content">
29
+ <h1>主标题</h1>
30
+ <p class="intro">介绍段落</p>
31
+ <ul class="list">
32
+ <li>项目1</li>
33
+ <li>项目2</li>
34
+ <li>项目3</li>
35
+ </ul>
36
+ <a href="https://example.com" class="link">链接文本</a>
37
+ <img src="image.jpg" alt="图片描述" class="image">
38
+ </div>
39
+ </body>
40
+ </html>
41
+ """
42
+
43
+ selector = Selector(text=html_content)
44
+
45
+ # 测试 is_xpath
46
+ print("1. 测试 is_xpath:")
47
+ print(f" '/' 开头: {sh.is_xpath('/')}")
48
+ print(f" '//' 开头: {sh.is_xpath('//title')}")
49
+ print(f" './' 开头: {sh.is_xpath('./div')}")
50
+ print(f" 'title' 开头: {sh.is_xpath('title')}")
51
+ print()
52
+
53
+ # 测试 extract_text
54
+ print("2. 测试 extract_text:")
55
+ title_elements = selector.css('title')
56
+ title_text = sh.extract_text(title_elements)
57
+ print(f" 标题文本: {title_text}")
58
+
59
+ h1_elements = selector.css('.content h1')
60
+ h1_text = sh.extract_text(h1_elements)
61
+ print(f" H1文本: {h1_text}")
62
+ print()
63
+
64
+ # 测试 extract_texts
65
+ print("3. 测试 extract_texts:")
66
+ li_elements = selector.css('.list li')
67
+ li_texts = sh.extract_texts(li_elements)
68
+ print(f" 列表项文本: {li_texts}")
69
+ print()
70
+
71
+ # 测试 extract_attr
72
+ print("4. 测试 extract_attr:")
73
+ link_elements = selector.css('.link')
74
+ link_href = sh.extract_attr(link_elements, 'href')
75
+ print(f" 链接href: {link_href}")
76
+
77
+ img_elements = selector.css('.image')
78
+ img_alt = sh.extract_attr(img_elements, 'alt')
79
+ print(f" 图片alt: {img_alt}")
80
+ print()
81
+
82
+ # 测试 extract_attrs
83
+ print("5. 测试 extract_attrs:")
84
+ all_links = selector.css('a')
85
+ all_hrefs = sh.extract_attrs(all_links, 'href')
86
+ print(f" 所有链接href: {all_hrefs}")
87
+
88
+ all_images = selector.css('img')
89
+ all_srcs = sh.extract_attrs(all_images, 'src')
90
+ print(f" 所有图片src: {all_srcs}")
91
+ print()
92
+
93
+ print("所有测试完成!")
94
+
95
+
96
+ if __name__ == '__main__':
97
+ test_direct_selector_helper()
@@ -0,0 +1,12 @@
1
+ # Define here the models for your scraped items
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/items.html
5
+
6
+ import scrapy
7
+
8
+
9
+ class OfweekScrapyItem(scrapy.Item):
10
+ # define the fields for your item here like:
11
+ # name = scrapy.Field()
12
+ pass
@@ -0,0 +1,100 @@
1
+ # Define here the models for your spider middleware
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
5
+
6
+ from scrapy import signals
7
+
8
+ # useful for handling different item types with a single interface
9
+ from itemadapter import ItemAdapter
10
+
11
+
12
+ class OfweekScrapySpiderMiddleware:
13
+ # Not all methods need to be defined. If a method is not defined,
14
+ # scrapy acts as if the spider middleware does not modify the
15
+ # passed objects.
16
+
17
+ @classmethod
18
+ def from_crawler(cls, crawler):
19
+ # This method is used by Scrapy to create your spiders.
20
+ s = cls()
21
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
22
+ return s
23
+
24
+ def process_spider_input(self, response, spider):
25
+ # Called for each response that goes through the spider
26
+ # middleware and into the spider.
27
+
28
+ # Should return None or raise an exception.
29
+ return None
30
+
31
+ def process_spider_output(self, response, result, spider):
32
+ # Called with the results returned from the Spider, after
33
+ # it has processed the response.
34
+
35
+ # Must return an iterable of Request, or item objects.
36
+ for i in result:
37
+ yield i
38
+
39
+ def process_spider_exception(self, response, exception, spider):
40
+ # Called when a spider or process_spider_input() method
41
+ # (from other spider middleware) raises an exception.
42
+
43
+ # Should return either None or an iterable of Request or item objects.
44
+ pass
45
+
46
+ async def process_start(self, start):
47
+ # Called with an async iterator over the spider start() method or the
48
+ # maching method of an earlier spider middleware.
49
+ async for item_or_request in start:
50
+ yield item_or_request
51
+
52
+ def spider_opened(self, spider):
53
+ spider.logger.info("Spider opened: %s" % spider.name)
54
+
55
+
56
+ class OfweekScrapyDownloaderMiddleware:
57
+ # Not all methods need to be defined. If a method is not defined,
58
+ # scrapy acts as if the downloader middleware does not modify the
59
+ # passed objects.
60
+
61
+ @classmethod
62
+ def from_crawler(cls, crawler):
63
+ # This method is used by Scrapy to create your spiders.
64
+ s = cls()
65
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
66
+ return s
67
+
68
+ def process_request(self, request, spider):
69
+ # Called for each request that goes through the downloader
70
+ # middleware.
71
+
72
+ # Must either:
73
+ # - return None: continue processing this request
74
+ # - or return a Response object
75
+ # - or return a Request object
76
+ # - or raise IgnoreRequest: process_exception() methods of
77
+ # installed downloader middleware will be called
78
+ return None
79
+
80
+ def process_response(self, request, response, spider):
81
+ # Called with the response returned from the downloader.
82
+
83
+ # Must either;
84
+ # - return a Response object
85
+ # - return a Request object
86
+ # - or raise IgnoreRequest
87
+ return response
88
+
89
+ def process_exception(self, request, exception, spider):
90
+ # Called when a download handler or a process_request()
91
+ # (from other downloader middleware) raises an exception.
92
+
93
+ # Must either:
94
+ # - return None: continue processing this exception
95
+ # - return a Response object: stops process_exception() chain
96
+ # - return a Request object: stops process_exception() chain
97
+ pass
98
+
99
+ def spider_opened(self, spider):
100
+ spider.logger.info("Spider opened: %s" % spider.name)
@@ -0,0 +1,13 @@
1
+ # Define your item pipelines here
2
+ #
3
+ # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4
+ # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5
+
6
+
7
+ # useful for handling different item types with a single interface
8
+ from itemadapter import ItemAdapter
9
+
10
+
11
+ class OfweekScrapyPipeline:
12
+ def process_item(self, item, spider):
13
+ return item
@@ -0,0 +1,85 @@
1
+ # Scrapy settings for ofweek_scrapy project
2
+
3
+ BOT_NAME = 'ofweek_scrapy'
4
+
5
+ SPIDER_MODULES = ['ofweek_scrapy.spiders']
6
+ NEWSPIDER_MODULE = 'ofweek_scrapy.spiders'
7
+
8
+ # Obey robots.txt rules
9
+ ROBOTSTXT_OBEY = False
10
+
11
+ # Configure maximum concurrent requests performed by Scrapy (default: 16)
12
+ # 使用与Crawlo相同的并发数
13
+ CONCURRENT_REQUESTS = 8
14
+
15
+ # Configure a delay for requests for the same website (default: 0)
16
+ # 使用与Crawlo相同的下载延迟
17
+ DOWNLOAD_DELAY = 1.0
18
+ # The download delay setting will honor only one of:
19
+ #CONCURRENT_REQUESTS_PER_DOMAIN = 16
20
+ #CONCURRENT_REQUESTS_PER_IP = 16
21
+
22
+ # Disable cookies (enabled by default)
23
+ #COOKIES_ENABLED = False
24
+
25
+ # Disable Telnet Console (enabled by default)
26
+ #TELNETCONSOLE_ENABLED = False
27
+
28
+ # Override the default request headers:
29
+ #DEFAULT_REQUEST_HEADERS = {
30
+ # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
31
+ # 'Accept-Language': 'en',
32
+ #}
33
+
34
+ # Enable or disable spider middlewares
35
+ # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
36
+ #SPIDER_MIDDLEWARES = {
37
+ # 'ofweek_scrapy.middlewares.OfweekScrapySpiderMiddleware': 543,
38
+ #}
39
+
40
+ # Enable or disable downloader middlewares
41
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
42
+ #DOWNLOADER_MIDDLEWARES = {
43
+ # 'ofweek_scrapy.middlewares.OfweekScrapyDownloaderMiddleware': 543,
44
+ #}
45
+
46
+ # Enable or disable extensions
47
+ # See https://docs.scrapy.org/en/latest/topics/extensions.html
48
+ #EXTENSIONS = {
49
+ # 'scrapy.extensions.telnet.TelnetConsole': None,
50
+ #}
51
+
52
+ # Configure item pipelines
53
+ # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
54
+ #ITEM_PIPELINES = {
55
+ # 'ofweek_scrapy.pipelines.OfweekScrapyPipeline': 300,
56
+ #}
57
+
58
+ # Enable and configure the AutoThrottle extension (disabled by default)
59
+ # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
60
+ #AUTOTHROTTLE_ENABLED = True
61
+ # The initial download delay
62
+ #AUTOTHROTTLE_START_DELAY = 5
63
+ # The maximum download delay to be set in case of high latencies
64
+ #AUTOTHROTTLE_MAX_DELAY = 60
65
+ # The average number of requests Scrapy should be sending in parallel to
66
+ # each remote server
67
+ #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
68
+ # Enable showing throttling stats for every response received:
69
+ #AUTOTHROTTLE_DEBUG = False
70
+
71
+ # Enable and configure HTTP caching (disabled by default)
72
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
73
+ #HTTPCACHE_ENABLED = True
74
+ #HTTPCACHE_EXPIRATION_SECS = 0
75
+ #HTTPCACHE_DIR = 'httpcache'
76
+ #HTTPCACHE_IGNORE_HTTP_CODES = []
77
+ #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
78
+
79
+ # Set settings whose default value is deprecated to a future-proof value
80
+ REQUEST_FINGERPRINTER_IMPLEMENTATION = '2.7'
81
+ TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
82
+ FEED_EXPORT_ENCODING = 'utf-8'
83
+
84
+ # 日志配置
85
+ LOG_LEVEL = 'INFO'
@@ -0,0 +1,4 @@
1
+ # This package will contain the spiders of your Scrapy project
2
+ #
3
+ # Please refer to the documentation for information on how to create and manage
4
+ # your spiders.