crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (113) hide show
  1. crawlo/__init__.py +34 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/list.py +155 -155
  8. crawlo/commands/run.py +285 -285
  9. crawlo/commands/startproject.py +196 -196
  10. crawlo/commands/stats.py +188 -188
  11. crawlo/commands/utils.py +186 -186
  12. crawlo/config.py +279 -279
  13. crawlo/core/__init__.py +2 -2
  14. crawlo/core/engine.py +171 -171
  15. crawlo/core/enhanced_engine.py +189 -189
  16. crawlo/core/processor.py +40 -40
  17. crawlo/core/scheduler.py +166 -162
  18. crawlo/crawler.py +1027 -1027
  19. crawlo/downloader/__init__.py +242 -242
  20. crawlo/downloader/aiohttp_downloader.py +212 -212
  21. crawlo/downloader/cffi_downloader.py +251 -251
  22. crawlo/downloader/httpx_downloader.py +259 -257
  23. crawlo/event.py +11 -11
  24. crawlo/exceptions.py +82 -78
  25. crawlo/extension/__init__.py +31 -31
  26. crawlo/extension/log_interval.py +49 -49
  27. crawlo/extension/log_stats.py +44 -44
  28. crawlo/extension/logging_extension.py +34 -34
  29. crawlo/filters/__init__.py +154 -154
  30. crawlo/filters/aioredis_filter.py +242 -242
  31. crawlo/filters/memory_filter.py +269 -269
  32. crawlo/items/__init__.py +23 -23
  33. crawlo/items/base.py +21 -21
  34. crawlo/items/fields.py +53 -53
  35. crawlo/items/items.py +104 -104
  36. crawlo/middleware/__init__.py +21 -21
  37. crawlo/middleware/default_header.py +32 -32
  38. crawlo/middleware/download_delay.py +28 -28
  39. crawlo/middleware/middleware_manager.py +135 -135
  40. crawlo/middleware/proxy.py +248 -248
  41. crawlo/middleware/request_ignore.py +30 -30
  42. crawlo/middleware/response_code.py +18 -18
  43. crawlo/middleware/response_filter.py +26 -26
  44. crawlo/middleware/retry.py +125 -125
  45. crawlo/mode_manager.py +200 -200
  46. crawlo/network/__init__.py +21 -21
  47. crawlo/network/request.py +311 -311
  48. crawlo/network/response.py +271 -269
  49. crawlo/pipelines/__init__.py +22 -13
  50. crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
  51. crawlo/pipelines/console_pipeline.py +39 -39
  52. crawlo/pipelines/csv_pipeline.py +316 -316
  53. crawlo/pipelines/database_dedup_pipeline.py +225 -0
  54. crawlo/pipelines/json_pipeline.py +218 -218
  55. crawlo/pipelines/memory_dedup_pipeline.py +116 -0
  56. crawlo/pipelines/mongo_pipeline.py +116 -116
  57. crawlo/pipelines/mysql_pipeline.py +195 -195
  58. crawlo/pipelines/pipeline_manager.py +56 -56
  59. crawlo/pipelines/redis_dedup_pipeline.py +163 -0
  60. crawlo/project.py +153 -153
  61. crawlo/queue/pqueue.py +37 -37
  62. crawlo/queue/queue_manager.py +307 -303
  63. crawlo/queue/redis_priority_queue.py +208 -191
  64. crawlo/settings/__init__.py +7 -7
  65. crawlo/settings/default_settings.py +245 -226
  66. crawlo/settings/setting_manager.py +99 -99
  67. crawlo/spider/__init__.py +639 -639
  68. crawlo/stats_collector.py +59 -59
  69. crawlo/subscriber.py +106 -106
  70. crawlo/task_manager.py +30 -30
  71. crawlo/templates/crawlo.cfg.tmpl +10 -10
  72. crawlo/templates/project/__init__.py.tmpl +3 -3
  73. crawlo/templates/project/items.py.tmpl +17 -17
  74. crawlo/templates/project/middlewares.py.tmpl +86 -86
  75. crawlo/templates/project/pipelines.py.tmpl +341 -335
  76. crawlo/templates/project/run.py.tmpl +251 -238
  77. crawlo/templates/project/settings.py.tmpl +250 -247
  78. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  79. crawlo/templates/spider/spider.py.tmpl +177 -177
  80. crawlo/utils/__init__.py +7 -7
  81. crawlo/utils/controlled_spider_mixin.py +439 -335
  82. crawlo/utils/date_tools.py +233 -233
  83. crawlo/utils/db_helper.py +343 -343
  84. crawlo/utils/func_tools.py +82 -82
  85. crawlo/utils/large_scale_config.py +286 -286
  86. crawlo/utils/large_scale_helper.py +343 -343
  87. crawlo/utils/log.py +128 -128
  88. crawlo/utils/queue_helper.py +175 -175
  89. crawlo/utils/request.py +267 -267
  90. crawlo/utils/request_serializer.py +219 -219
  91. crawlo/utils/spider_loader.py +62 -62
  92. crawlo/utils/system.py +11 -11
  93. crawlo/utils/tools.py +4 -4
  94. crawlo/utils/url.py +39 -39
  95. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
  96. crawlo-1.1.3.dist-info/RECORD +113 -0
  97. examples/__init__.py +7 -7
  98. examples/controlled_spider_example.py +205 -0
  99. tests/__init__.py +7 -7
  100. tests/test_final_validation.py +153 -153
  101. tests/test_proxy_health_check.py +32 -32
  102. tests/test_proxy_middleware_integration.py +136 -136
  103. tests/test_proxy_providers.py +56 -56
  104. tests/test_proxy_stats.py +19 -19
  105. tests/test_proxy_strategies.py +59 -59
  106. tests/test_redis_config.py +28 -28
  107. tests/test_redis_queue.py +224 -224
  108. tests/test_request_serialization.py +70 -70
  109. tests/test_scheduler.py +241 -241
  110. crawlo-1.1.2.dist-info/RECORD +0 -108
  111. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
  112. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
  113. {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
crawlo/core/scheduler.py CHANGED
@@ -1,162 +1,166 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Optional, Callable
4
-
5
- from crawlo.utils.log import get_logger
6
- from crawlo.utils.request import set_request
7
- from crawlo.utils.request_serializer import RequestSerializer
8
- from crawlo.queue.queue_manager import QueueManager, QueueConfig
9
- from crawlo.project import load_class, common_call
10
-
11
-
12
- class Scheduler:
13
- def __init__(self, crawler, dupe_filter, stats, log_level, priority):
14
- self.crawler = crawler
15
- self.queue_manager: Optional[QueueManager] = None
16
- self.request_serializer = RequestSerializer() # 专门处理序列化
17
-
18
- self.logger = get_logger(name=self.__class__.__name__, level=log_level)
19
- self.stats = stats
20
- self.dupe_filter = dupe_filter
21
- self.priority = priority
22
-
23
- @classmethod
24
- def create_instance(cls, crawler):
25
- filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
26
- o = cls(
27
- crawler=crawler,
28
- dupe_filter=filter_cls.create_instance(crawler),
29
- stats=crawler.stats,
30
- log_level=crawler.settings.get('LOG_LEVEL'),
31
- priority=crawler.settings.get('DEPTH_PRIORITY')
32
- )
33
- return o
34
-
35
- async def open(self):
36
- """初始化调度器和队列"""
37
- # 创建队列配置
38
- queue_config = QueueConfig.from_settings(self.crawler.settings)
39
-
40
- # 创建队列管理器
41
- self.queue_manager = QueueManager(queue_config)
42
-
43
- # 初始化队列
44
- success = await self.queue_manager.initialize()
45
- if not success:
46
- raise RuntimeError("队列初始化失败")
47
-
48
- # 输出队列状态
49
- status = self.queue_manager.get_status()
50
- self.logger.info(f'队列类型: {status["type"]}, 状态: {status["health"]}')
51
- self.logger.info(f'requesting filter: {self.dupe_filter}')
52
-
53
- async def next_request(self):
54
- """获取下一个请求"""
55
- if not self.queue_manager:
56
- return None
57
-
58
- request = await self.queue_manager.get()
59
-
60
- # 恢复 callback(从 Redis 队列取出时)
61
- if request:
62
- spider = getattr(self.crawler, 'spider', None)
63
- request = self.request_serializer.restore_after_deserialization(request, spider)
64
-
65
- return request
66
-
67
- async def enqueue_request(self, request):
68
- """将请求加入队列"""
69
- if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
70
- self.dupe_filter.log_stats(request)
71
- return False
72
-
73
- if not self.queue_manager:
74
- self.logger.error("队列管理器未初始化")
75
- return False
76
-
77
- set_request(request, self.priority)
78
-
79
- # 使用统一的队列接口
80
- success = await self.queue_manager.put(request, priority=getattr(request, 'priority', 0))
81
-
82
- if success:
83
- self.logger.debug(f"✅ 请求入队成功: {request.url}")
84
-
85
- return success
86
-
87
- def idle(self) -> bool:
88
- """检查队列是否为空"""
89
- return len(self) == 0
90
-
91
- async def close(self):
92
- """关闭调度器"""
93
- if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
94
- await closed()
95
-
96
- if self.queue_manager:
97
- await self.queue_manager.close()
98
-
99
- def __len__(self):
100
- """获取队列大小"""
101
- if not self.queue_manager:
102
- return 0
103
- # 返回同步的近似值,实际大小需要异步获取
104
- return 0 if self.queue_manager.empty() else 1
105
-
106
- # #!/usr/bin/python
107
- # # -*- coding:UTF-8 -*-
108
- # from typing import Optional, Callable
109
- #
110
- # from crawlo.utils.log import get_logger
111
- # from crawlo.utils.request import set_request
112
- # from crawlo.utils.pqueue import SpiderPriorityQueue
113
- # from crawlo.project import load_class, common_call
114
- #
115
- #
116
- # class Scheduler:
117
- # def __init__(self, crawler, dupe_filter, stats, log_level, priority):
118
- # self.crawler = crawler
119
- # self.request_queue: Optional[SpiderPriorityQueue] = None
120
- #
121
- # self.logger = get_logger(name=self.__class__.__name__, level=log_level)
122
- # self.stats = stats
123
- # self.dupe_filter = dupe_filter
124
- # self.priority = priority
125
- #
126
- # @classmethod
127
- # def create_instance(cls, crawler):
128
- # filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
129
- # o = cls(
130
- # crawler=crawler,
131
- # dupe_filter=filter_cls.create_instance(crawler),
132
- # stats=crawler.stats,
133
- # log_level=crawler.settings.get('LOG_LEVEL'),
134
- # priority=crawler.settings.get('DEPTH_PRIORITY')
135
- # )
136
- # return o
137
- #
138
- # def open(self):
139
- # self.request_queue = SpiderPriorityQueue()
140
- # self.logger.info(f'requesting filter: {self.dupe_filter}')
141
- #
142
- # async def next_request(self):
143
- # request = await self.request_queue.get()
144
- # return request
145
- #
146
- # async def enqueue_request(self, request):
147
- # if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
148
- # self.dupe_filter.log_stats(request)
149
- # return False
150
- # set_request(request, self.priority)
151
- # await self.request_queue.put(request)
152
- # return True
153
- #
154
- # def idle(self) -> bool:
155
- # return len(self) == 0
156
- #
157
- # async def close(self):
158
- # if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
159
- # await closed()
160
- #
161
- # def __len__(self):
162
- # return self.request_queue.qsize()
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Optional, Callable
4
+
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.utils.request import set_request
7
+ from crawlo.utils.request_serializer import RequestSerializer
8
+ from crawlo.queue.queue_manager import QueueManager, QueueConfig
9
+ from crawlo.project import load_class, common_call
10
+
11
+
12
+ class Scheduler:
13
+ def __init__(self, crawler, dupe_filter, stats, log_level, priority):
14
+ self.crawler = crawler
15
+ self.queue_manager: Optional[QueueManager] = None
16
+ self.request_serializer = RequestSerializer() # 专门处理序列化
17
+
18
+ self.logger = get_logger(name=self.__class__.__name__, level=log_level)
19
+ self.stats = stats
20
+ self.dupe_filter = dupe_filter
21
+ self.priority = priority
22
+
23
+ @classmethod
24
+ def create_instance(cls, crawler):
25
+ filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
26
+ o = cls(
27
+ crawler=crawler,
28
+ dupe_filter=filter_cls.create_instance(crawler),
29
+ stats=crawler.stats,
30
+ log_level=crawler.settings.get('LOG_LEVEL'),
31
+ priority=crawler.settings.get('DEPTH_PRIORITY')
32
+ )
33
+ return o
34
+
35
+ async def open(self):
36
+ """初始化调度器和队列"""
37
+ try:
38
+ # 创建队列配置
39
+ queue_config = QueueConfig.from_settings(self.crawler.settings)
40
+
41
+ # 创建队列管理器
42
+ self.queue_manager = QueueManager(queue_config)
43
+
44
+ # 初始化队列
45
+ success = await self.queue_manager.initialize()
46
+ if not success:
47
+ raise RuntimeError("队列初始化失败")
48
+
49
+ # 输出队列状态
50
+ status = self.queue_manager.get_status()
51
+ self.logger.info(f'队列类型: {status["type"]}, 状态: {status["health"]}')
52
+ self.logger.info(f'requesting filter: {self.dupe_filter}')
53
+ except Exception as e:
54
+ self.logger.error(f"❌ 调度器初始化失败: {e}")
55
+ raise
56
+
57
+ async def next_request(self):
58
+ """获取下一个请求"""
59
+ if not self.queue_manager:
60
+ return None
61
+
62
+ request = await self.queue_manager.get()
63
+
64
+ # 恢复 callback(从 Redis 队列取出时)
65
+ if request:
66
+ spider = getattr(self.crawler, 'spider', None)
67
+ request = self.request_serializer.restore_after_deserialization(request, spider)
68
+
69
+ return request
70
+
71
+ async def enqueue_request(self, request):
72
+ """将请求加入队列"""
73
+ if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
74
+ self.dupe_filter.log_stats(request)
75
+ return False
76
+
77
+ if not self.queue_manager:
78
+ self.logger.error("队列管理器未初始化")
79
+ return False
80
+
81
+ set_request(request, self.priority)
82
+
83
+ # 使用统一的队列接口
84
+ success = await self.queue_manager.put(request, priority=getattr(request, 'priority', 0))
85
+
86
+ if success:
87
+ self.logger.debug(f"✅ 请求入队成功: {request.url}")
88
+
89
+ return success
90
+
91
+ def idle(self) -> bool:
92
+ """检查队列是否为空"""
93
+ return len(self) == 0
94
+
95
+ async def close(self):
96
+ """关闭调度器"""
97
+ if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
98
+ await closed()
99
+
100
+ if self.queue_manager:
101
+ await self.queue_manager.close()
102
+
103
+ def __len__(self):
104
+ """获取队列大小"""
105
+ if not self.queue_manager:
106
+ return 0
107
+ # 返回同步的近似值,实际大小需要异步获取
108
+ return 0 if self.queue_manager.empty() else 1
109
+
110
+ # #!/usr/bin/python
111
+ # # -*- coding:UTF-8 -*-
112
+ # from typing import Optional, Callable
113
+ #
114
+ # from crawlo.utils.log import get_logger
115
+ # from crawlo.utils.request import set_request
116
+ # from crawlo.utils.pqueue import SpiderPriorityQueue
117
+ # from crawlo.project import load_class, common_call
118
+ #
119
+ #
120
+ # class Scheduler:
121
+ # def __init__(self, crawler, dupe_filter, stats, log_level, priority):
122
+ # self.crawler = crawler
123
+ # self.request_queue: Optional[SpiderPriorityQueue] = None
124
+ #
125
+ # self.logger = get_logger(name=self.__class__.__name__, level=log_level)
126
+ # self.stats = stats
127
+ # self.dupe_filter = dupe_filter
128
+ # self.priority = priority
129
+ #
130
+ # @classmethod
131
+ # def create_instance(cls, crawler):
132
+ # filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
133
+ # o = cls(
134
+ # crawler=crawler,
135
+ # dupe_filter=filter_cls.create_instance(crawler),
136
+ # stats=crawler.stats,
137
+ # log_level=crawler.settings.get('LOG_LEVEL'),
138
+ # priority=crawler.settings.get('DEPTH_PRIORITY')
139
+ # )
140
+ # return o
141
+ #
142
+ # def open(self):
143
+ # self.request_queue = SpiderPriorityQueue()
144
+ # self.logger.info(f'requesting filter: {self.dupe_filter}')
145
+ #
146
+ # async def next_request(self):
147
+ # request = await self.request_queue.get()
148
+ # return request
149
+ #
150
+ # async def enqueue_request(self, request):
151
+ # if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
152
+ # self.dupe_filter.log_stats(request)
153
+ # return False
154
+ # set_request(request, self.priority)
155
+ # await self.request_queue.put(request)
156
+ # return True
157
+ #
158
+ # def idle(self) -> bool:
159
+ # return len(self) == 0
160
+ #
161
+ # async def close(self):
162
+ # if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
163
+ # await closed()
164
+ #
165
+ # def __len__(self):
166
+ # return self.request_queue.qsize()