crawlo 1.4.3__py3-none-any.whl → 1.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (107) hide show
  1. crawlo/__init__.py +11 -15
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/genspider.py +52 -17
  4. crawlo/commands/startproject.py +24 -0
  5. crawlo/core/engine.py +2 -2
  6. crawlo/core/scheduler.py +4 -4
  7. crawlo/crawler.py +13 -6
  8. crawlo/downloader/__init__.py +5 -2
  9. crawlo/extension/__init__.py +2 -2
  10. crawlo/filters/aioredis_filter.py +8 -1
  11. crawlo/filters/memory_filter.py +8 -1
  12. crawlo/initialization/built_in.py +13 -4
  13. crawlo/initialization/core.py +5 -4
  14. crawlo/interfaces.py +24 -0
  15. crawlo/middleware/__init__.py +7 -4
  16. crawlo/middleware/middleware_manager.py +15 -8
  17. crawlo/mode_manager.py +45 -11
  18. crawlo/network/response.py +374 -69
  19. crawlo/pipelines/mysql_pipeline.py +6 -6
  20. crawlo/pipelines/pipeline_manager.py +2 -2
  21. crawlo/project.py +2 -4
  22. crawlo/queue/pqueue.py +2 -6
  23. crawlo/queue/queue_manager.py +1 -2
  24. crawlo/settings/default_settings.py +15 -30
  25. crawlo/task_manager.py +2 -2
  26. crawlo/templates/project/items.py.tmpl +2 -2
  27. crawlo/templates/project/middlewares.py.tmpl +9 -89
  28. crawlo/templates/project/pipelines.py.tmpl +8 -68
  29. crawlo/templates/project/settings.py.tmpl +51 -65
  30. crawlo/templates/project/settings_distributed.py.tmpl +59 -67
  31. crawlo/templates/project/settings_gentle.py.tmpl +45 -40
  32. crawlo/templates/project/settings_high_performance.py.tmpl +45 -40
  33. crawlo/templates/project/settings_minimal.py.tmpl +37 -26
  34. crawlo/templates/project/settings_simple.py.tmpl +45 -40
  35. crawlo/templates/run.py.tmpl +3 -7
  36. crawlo/tools/__init__.py +0 -11
  37. crawlo/utils/__init__.py +17 -1
  38. crawlo/utils/db_helper.py +220 -319
  39. crawlo/utils/error_handler.py +313 -67
  40. crawlo/utils/fingerprint.py +3 -4
  41. crawlo/utils/misc.py +82 -0
  42. crawlo/utils/request.py +55 -66
  43. crawlo/utils/selector_helper.py +138 -0
  44. crawlo/utils/spider_loader.py +185 -45
  45. crawlo/utils/text_helper.py +95 -0
  46. crawlo-1.4.5.dist-info/METADATA +329 -0
  47. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/RECORD +89 -68
  48. tests/bug_check_test.py +251 -0
  49. tests/direct_selector_helper_test.py +97 -0
  50. tests/ofweek_scrapy/ofweek_scrapy/items.py +12 -0
  51. tests/ofweek_scrapy/ofweek_scrapy/middlewares.py +100 -0
  52. tests/ofweek_scrapy/ofweek_scrapy/pipelines.py +13 -0
  53. tests/ofweek_scrapy/ofweek_scrapy/settings.py +85 -0
  54. tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -0
  55. tests/ofweek_scrapy/ofweek_scrapy/spiders/ofweek_spider.py +162 -0
  56. tests/ofweek_scrapy/scrapy.cfg +11 -0
  57. tests/performance_comparison.py +4 -5
  58. tests/simple_crawlo_test.py +1 -2
  59. tests/simple_follow_test.py +39 -0
  60. tests/simple_response_selector_test.py +95 -0
  61. tests/simple_selector_helper_test.py +155 -0
  62. tests/simple_selector_test.py +208 -0
  63. tests/simple_url_test.py +74 -0
  64. tests/test_crawler_process_import.py +39 -0
  65. tests/test_crawler_process_spider_modules.py +48 -0
  66. tests/test_edge_cases.py +7 -5
  67. tests/test_encoding_core.py +57 -0
  68. tests/test_encoding_detection.py +127 -0
  69. tests/test_factory_compatibility.py +197 -0
  70. tests/test_multi_directory.py +68 -0
  71. tests/test_multiple_spider_modules.py +81 -0
  72. tests/test_optimized_selector_naming.py +101 -0
  73. tests/test_priority_behavior.py +18 -18
  74. tests/test_response_follow.py +105 -0
  75. tests/test_response_selector_methods.py +93 -0
  76. tests/test_response_url_methods.py +71 -0
  77. tests/test_response_urljoin.py +87 -0
  78. tests/test_scrapy_style_encoding.py +113 -0
  79. tests/test_selector_helper.py +101 -0
  80. tests/test_selector_optimizations.py +147 -0
  81. tests/test_spider_loader.py +50 -0
  82. tests/test_spider_loader_comprehensive.py +70 -0
  83. tests/test_spider_modules.py +85 -0
  84. tests/test_spiders/__init__.py +1 -0
  85. tests/test_spiders/test_spider.py +10 -0
  86. crawlo/tools/anti_crawler.py +0 -269
  87. crawlo/utils/class_loader.py +0 -26
  88. crawlo/utils/enhanced_error_handler.py +0 -357
  89. crawlo-1.4.3.dist-info/METADATA +0 -190
  90. examples/test_project/__init__.py +0 -7
  91. examples/test_project/run.py +0 -35
  92. examples/test_project/test_project/__init__.py +0 -4
  93. examples/test_project/test_project/items.py +0 -18
  94. examples/test_project/test_project/middlewares.py +0 -119
  95. examples/test_project/test_project/pipelines.py +0 -97
  96. examples/test_project/test_project/settings.py +0 -170
  97. examples/test_project/test_project/spiders/__init__.py +0 -10
  98. examples/test_project/test_project/spiders/of_week_dis.py +0 -144
  99. tests/simple_log_test.py +0 -58
  100. tests/simple_test.py +0 -48
  101. tests/test_framework_logger.py +0 -67
  102. tests/test_framework_startup.py +0 -65
  103. tests/test_mode_change.py +0 -73
  104. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/WHEEL +0 -0
  105. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/entry_points.txt +0 -0
  106. {crawlo-1.4.3.dist-info → crawlo-1.4.5.dist-info}/top_level.txt +0 -0
  107. /tests/{final_command_test_report.md → ofweek_scrapy/ofweek_scrapy/__init__.py} +0 -0
@@ -0,0 +1,329 @@
1
+ Metadata-Version: 2.4
2
+ Name: crawlo
3
+ Version: 1.4.5
4
+ Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
+ Home-page: https://github.com/crawl-coder/Crawlo.git
6
+ Author: crawl-coder
7
+ Author-email: crawlo@qq.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ Requires-Dist: aiohttp>=3.12.14
15
+ Requires-Dist: aiomysql>=0.2.0
16
+ Requires-Dist: aioredis>=2.0.1
17
+ Requires-Dist: asyncmy>=0.2.10
18
+ Requires-Dist: cssselect>=1.2.0
19
+ Requires-Dist: dateparser>=1.2.2
20
+ Requires-Dist: httpx[http2]>=0.27.0
21
+ Requires-Dist: curl-cffi>=0.13.0
22
+ Requires-Dist: lxml>=5.2.1
23
+ Requires-Dist: motor>=3.7.0
24
+ Requires-Dist: parsel>=1.9.1
25
+ Requires-Dist: pydantic>=2.11.7
26
+ Requires-Dist: pymongo>=4.11
27
+ Requires-Dist: PyMySQL>=1.1.1
28
+ Requires-Dist: python-dateutil>=2.9.0.post0
29
+ Requires-Dist: redis>=6.2.0
30
+ Requires-Dist: requests>=2.32.4
31
+ Requires-Dist: six>=1.17.0
32
+ Requires-Dist: ujson>=5.9.0
33
+ Requires-Dist: urllib3>=2.5.0
34
+ Requires-Dist: w3lib>=2.1.2
35
+ Requires-Dist: rich>=14.1.0
36
+ Requires-Dist: astor>=0.8.1
37
+ Requires-Dist: watchdog>=6.0.0
38
+ Provides-Extra: render
39
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
40
+ Requires-Dist: playwright; extra == "render"
41
+ Requires-Dist: selenium>=3.141.0; extra == "render"
42
+ Provides-Extra: all
43
+ Requires-Dist: bitarray>=1.5.3; extra == "all"
44
+ Requires-Dist: PyExecJS>=1.5.1; extra == "all"
45
+ Requires-Dist: pymongo>=3.10.1; extra == "all"
46
+ Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
47
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
48
+ Requires-Dist: playwright; extra == "all"
49
+ Requires-Dist: selenium>=3.141.0; extra == "all"
50
+
51
+ # Crawlo 爬虫框架
52
+
53
+ Crawlo 是一个高性能、可扩展的 Python 爬虫框架,支持单机和分布式部署。
54
+
55
+ ## 特性
56
+
57
+ - 高性能异步爬取
58
+ - 支持多种下载器 (aiohttp, httpx, curl-cffi)
59
+ - 内置数据清洗和验证
60
+ - 分布式爬取支持
61
+ - 灵活的中间件系统
62
+ - 强大的配置管理系统
63
+ - 详细的日志记录和监控
64
+ - Windows 和 Linux 兼容
65
+
66
+ ## 安装
67
+
68
+ ```bash
69
+ pip install crawlo
70
+ ```
71
+
72
+ 或者从源码安装:
73
+
74
+ ```bash
75
+ git clone git@github.com:crawl-coder/Crawlo.git
76
+ cd crawlo
77
+ pip install -r requirements.txt
78
+ pip install .
79
+ ```
80
+
81
+ ## 快速开始
82
+
83
+ ```python
84
+ from crawlo import Spider
85
+
86
+ class MySpider(Spider):
87
+ name = 'example'
88
+
89
+ def parse(self, response):
90
+ # 解析逻辑
91
+ pass
92
+
93
+ # 运行爬虫
94
+ # crawlo run example
95
+ ```
96
+
97
+ ## Response 对象功能
98
+
99
+ Crawlo 框架对 Response 对象进行了增强,提供了更多便捷方法:
100
+
101
+ ### URL 处理
102
+
103
+ 使用 Response 对象封装的 URL 处理方法可以方便地处理各种 URL 操作,无需手动导入 `urllib.parse` 中的函数:
104
+
105
+ ```python
106
+ class MySpider(Spider):
107
+ def parse(self, response):
108
+ # 1. 处理相对URL和绝对URL
109
+ absolute_url = response.urljoin('/relative/path')
110
+
111
+ # 2. 解析URL组件
112
+ parsed = response.urlparse() # 解析当前响应URL
113
+ scheme = parsed.scheme
114
+ domain = parsed.netloc
115
+ path = parsed.path
116
+
117
+ # 3. 解析查询参数
118
+ query_params = response.parse_qs() # 解析当前URL的查询参数
119
+
120
+ # 4. 编码查询参数
121
+ new_query = response.urlencode({'key': 'value', 'name': '测试'})
122
+
123
+ # 5. URL编码/解码
124
+ encoded = response.quote('hello world 你好')
125
+ decoded = response.unquote(encoded)
126
+
127
+ # 6. 移除URL片段
128
+ url_without_fragment, fragment = response.urldefrag('http://example.com/path#section')
129
+
130
+ yield Request(url=absolute_url, callback=self.parse_detail)
131
+ ```
132
+
133
+ ### 编码检测优化
134
+
135
+ Crawlo 框架参考 Scrapy 的设计模式对 Response 对象的编码检测功能进行了优化,提供了更准确和可靠的编码检测:
136
+
137
+ ```python
138
+ class MySpider(Spider):
139
+ def parse(self, response):
140
+ # 自动检测响应编码
141
+ encoding = response.encoding
142
+
143
+ # 获取声明的编码(Request编码 > BOM > HTTP头部 > HTML meta标签)
144
+ declared_encoding = response._declared_encoding()
145
+
146
+ # 响应文本已自动使用正确的编码解码
147
+ text = response.text
148
+
149
+ # 处理解码后的内容
150
+ # ...
151
+ ```
152
+
153
+ 编码检测优先级:
154
+ 1. Request 中指定的编码
155
+ 2. BOM 字节顺序标记
156
+ 3. HTTP Content-Type 头部
157
+ 4. HTML meta 标签声明
158
+ 5. 内容自动检测
159
+ 6. 默认编码 (utf-8)
160
+
161
+ ### 选择器方法优化
162
+
163
+ Crawlo 框架对 Response 对象的选择器方法进行了优化,提供了更便捷的数据提取功能,方法命名更加直观和统一:
164
+
165
+ ```python
166
+ class MySpider(Spider):
167
+ def parse(self, response):
168
+ # 1. 提取单个元素文本(支持CSS和XPath)
169
+ title = response.extract_text('title') # CSS选择器
170
+ title = response.extract_text('//title') # XPath选择器
171
+
172
+ # 2. 提取多个元素文本
173
+ paragraphs = response.extract_texts('.content p') # CSS选择器
174
+ paragraphs = response.extract_texts('//div[@class="content"]//p') # XPath选择器
175
+
176
+ # 3. 提取单个元素属性
177
+ link_href = response.extract_attr('a', 'href') # CSS选择器
178
+ link_href = response.extract_attr('//a[@class="link"]', 'href') # XPath选择器
179
+
180
+ # 4. 提取多个元素属性
181
+ all_links = response.extract_attrs('a', 'href') # CSS选择器
182
+ all_links = response.extract_attrs('//a[@class="link"]', 'href') # XPath选择器
183
+
184
+ yield {
185
+ 'title': title,
186
+ 'paragraphs': paragraphs,
187
+ 'links': all_links
188
+ }
189
+ ```
190
+
191
+ 所有选择器方法都采用了简洁直观的命名风格,便于记忆和使用。
192
+
193
+ ### 工具模块
194
+
195
+ Crawlo 框架提供了丰富的工具模块,用于处理各种常见任务。选择器相关的辅助函数现在位于 `crawlo.utils.selector_helper` 模块中:
196
+
197
+ ```python
198
+ from crawlo.utils import (
199
+ extract_text,
200
+ extract_texts,
201
+ extract_attr,
202
+ extract_attrs,
203
+ is_xpath
204
+ )
205
+
206
+ # 在自定义代码中使用这些工具函数
207
+ title_elements = response.css('title')
208
+ title_text = extract_text(title_elements)
209
+
210
+ li_elements = response.css('.list li')
211
+ li_texts = extract_texts(li_elements)
212
+
213
+ link_elements = response.css('.link')
214
+ link_href = extract_attr(link_elements, 'href')
215
+
216
+ all_links = response.css('a')
217
+ all_hrefs = extract_attrs(all_links, 'href')
218
+ ```
219
+
220
+ ## 日志系统
221
+
222
+ Crawlo 拥有一个功能强大的日志系统,支持多种配置选项:
223
+
224
+ ### 基本配置
225
+
226
+ ```python
227
+ from crawlo.logging import configure_logging, get_logger
228
+
229
+ # 配置日志系统
230
+ configure_logging(
231
+ LOG_LEVEL='INFO',
232
+ LOG_FILE='logs/app.log',
233
+ LOG_MAX_BYTES=10*1024*1024, # 10MB
234
+ LOG_BACKUP_COUNT=5
235
+ )
236
+
237
+ # 获取logger
238
+ logger = get_logger('my_module')
239
+ logger.info('这是一条日志消息')
240
+ ```
241
+
242
+ ### 高级配置
243
+
244
+ ```python
245
+ # 分别配置控制台和文件日志级别
246
+ configure_logging(
247
+ LOG_LEVEL='INFO',
248
+ LOG_CONSOLE_LEVEL='WARNING', # 控制台只显示WARNING及以上级别
249
+ LOG_FILE_LEVEL='DEBUG', # 文件记录DEBUG及以上级别
250
+ LOG_FILE='logs/app.log',
251
+ LOG_INCLUDE_THREAD_ID=True, # 包含线程ID
252
+ LOG_INCLUDE_PROCESS_ID=True # 包含进程ID
253
+ )
254
+
255
+ # 模块特定日志级别
256
+ configure_logging(
257
+ LOG_LEVEL='WARNING',
258
+ LOG_LEVELS={
259
+ 'my_module.debug': 'DEBUG',
260
+ 'my_module.info': 'INFO'
261
+ }
262
+ )
263
+ ```
264
+
265
+ ### 性能监控
266
+
267
+ ```python
268
+ from crawlo.logging import get_monitor
269
+
270
+ # 启用日志性能监控
271
+ monitor = get_monitor()
272
+ monitor.enable_monitoring()
273
+
274
+ # 获取性能报告
275
+ report = monitor.get_performance_report()
276
+ print(report)
277
+ ```
278
+
279
+ ### 日志采样
280
+
281
+ ```python
282
+ from crawlo.logging import get_sampler
283
+
284
+ # 设置采样率(只记录30%的日志)
285
+ sampler = get_sampler()
286
+ sampler.set_sample_rate('my_module', 0.3)
287
+
288
+ # 设置速率限制(每秒最多100条日志)
289
+ sampler.set_rate_limit('my_module', 100)
290
+ ```
291
+
292
+ ## Windows 兼容性说明
293
+
294
+ 在 Windows 系统上使用日志轮转功能时,可能会遇到文件锁定问题。为了解决这个问题,建议安装 `concurrent-log-handler` 库:
295
+
296
+ ```bash
297
+ pip install concurrent-log-handler
298
+ ```
299
+
300
+ Crawlo 框架会自动检测并使用这个库来提供更好的 Windows 兼容性。
301
+
302
+ 如果未安装 `concurrent-log-handler`,在 Windows 上运行时可能会出现以下错误:
303
+ ```
304
+ PermissionError: [WinError 32] 另一个程序正在使用此文件,进程无法访问。
305
+ ```
306
+
307
+ ## 爬虫自动发现
308
+
309
+ Crawlo 框架支持通过 `SPIDER_MODULES` 配置自动发现和加载爬虫,类似于 Scrapy 的机制:
310
+
311
+ ```python
312
+ # settings.py
313
+ SPIDER_MODULES = [
314
+ 'myproject.spiders',
315
+ 'myproject.more_spiders',
316
+ ]
317
+
318
+ SPIDER_LOADER_WARN_ONLY = True # 加载错误时只警告不报错
319
+ ```
320
+
321
+ 框架会自动扫描配置的模块目录,发现并注册其中的爬虫类。
322
+
323
+ ## 文档
324
+
325
+ 请查看 [文档](https://your-docs-url.com) 获取更多信息。
326
+
327
+ ## 许可证
328
+
329
+ MIT