crawlo 1.0.6__py3-none-any.whl → 1.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

crawlo/crawler.py CHANGED
@@ -1,19 +1,18 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding: UTF-8 -*-
3
+ from __future__ import annotations
3
4
  import asyncio
4
5
  import signal
5
- from typing import Type, Optional, Set, List
6
-
7
- from crawlo.spider import Spider
8
- from crawlo.core.engine import Engine
9
- from crawlo.utils.log import get_logger
10
- from crawlo.subscriber import Subscriber
11
- from crawlo.extension import ExtensionManager
12
- from crawlo.exceptions import SpiderTypeError
13
- from crawlo.stats_collector import StatsCollector
14
- from crawlo.event import spider_opened, spider_closed
15
- from crawlo.settings.setting_manager import SettingManager
16
- from crawlo.utils.project import merge_settings, get_settings
6
+ from typing import Type, Optional, Set, List, Union, Dict
7
+ from .spider import Spider, get_global_spider_registry
8
+ from .core.engine import Engine
9
+ from .utils.log import get_logger
10
+ from .subscriber import Subscriber
11
+ from .extension import ExtensionManager
12
+ from .stats_collector import StatsCollector
13
+ from .event import spider_opened, spider_closed
14
+ from .settings.setting_manager import SettingManager
15
+ from .utils.project import merge_settings, get_settings
17
16
 
18
17
 
19
18
  logger = get_logger(__name__)
@@ -30,7 +29,7 @@ class Crawler:
30
29
  self.subscriber: Optional[Subscriber] = None
31
30
  self.extension: Optional[ExtensionManager] = None
32
31
  self.settings: SettingManager = settings.copy()
33
- self._closed = False # 新增状态
32
+ self._closed = False
34
33
  self._close_lock = asyncio.Lock()
35
34
 
36
35
  async def crawl(self):
@@ -40,7 +39,6 @@ class Crawler:
40
39
  self.engine = self._create_engine()
41
40
  self.stats = self._create_stats()
42
41
  self.extension = self._create_extension()
43
-
44
42
  await self.engine.start_spider(self.spider)
45
43
 
46
44
  @staticmethod
@@ -50,7 +48,6 @@ class Crawler:
50
48
  def _create_spider(self) -> Spider:
51
49
  spider = self.spider_cls.create_instance(self)
52
50
 
53
- # --- 关键属性检查 ---
54
51
  if not getattr(spider, 'name', None):
55
52
  raise AttributeError(f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。")
56
53
 
@@ -92,19 +89,36 @@ class Crawler:
92
89
  await self.subscriber.notify(spider_closed)
93
90
  if self.stats and self.spider:
94
91
  self.stats.close_spider(spider=self.spider, reason=reason)
92
+ from crawlo.commands.stats import record_stats
93
+ record_stats(self)
95
94
 
96
95
 
97
96
  class CrawlerProcess:
98
97
  """
99
- 爬虫进程管理器,支持多爬虫并发调度、信号量控制、实时日志与优雅关闭
98
+ 爬虫进程管理器,支持:
99
+ - 自动发现爬虫模块
100
+ - 通过 name 或类启动爬虫
101
+ - 并发控制
102
+ - 优雅关闭
100
103
  """
101
104
 
102
- def __init__(self, settings: Optional[SettingManager] = None, max_concurrency: Optional[int] = None):
105
+ def __init__(
106
+ self,
107
+ settings: Optional[SettingManager] = None,
108
+ max_concurrency: Optional[int] = None,
109
+ spider_modules: Optional[List[str]] = None
110
+ ):
103
111
  self.settings: SettingManager = settings or self._get_default_settings()
104
112
  self.crawlers: Set[Crawler] = set()
105
113
  self._active_tasks: Set[asyncio.Task] = set()
106
114
 
107
- # 使用专用配置,降级使用 CONCURRENCY
115
+ # 自动发现并导入爬虫模块
116
+ if spider_modules:
117
+ self.auto_discover(spider_modules)
118
+
119
+ # 使用全局注册表的快照(避免后续导入影响)
120
+ self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
121
+
108
122
  self.max_concurrency: int = (
109
123
  max_concurrency
110
124
  or self.settings.get('MAX_RUNNING_SPIDERS')
@@ -117,80 +131,120 @@ class CrawlerProcess:
117
131
  signal.signal(signal.SIGTERM, self._shutdown)
118
132
  logger.info(f"CrawlerProcess 初始化完成,最大并行爬虫数: {self.max_concurrency}")
119
133
 
120
- async def crawl(self, spiders):
121
- """
122
- 启动一个或多个爬虫,流式调度,支持实时进度反馈
123
- """
124
- spider_classes = self._normalize_spiders(spiders)
125
- total = len(spider_classes)
134
+ def auto_discover(self, modules: List[str]):
135
+ """自动导入模块,触发 Spider 类定义和注册"""
136
+ import importlib
137
+ import pkgutil
138
+ for module_name in modules:
139
+ try:
140
+ module = importlib.import_module(module_name)
141
+ if hasattr(module, '__path__'):
142
+ for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
143
+ importlib.import_module(name)
144
+ else:
145
+ importlib.import_module(module_name)
146
+ logger.debug(f"已扫描模块: {module_name}")
147
+ except Exception as e:
148
+ logger.error(f"扫描模块 {module_name} 失败: {e}", exc_info=True)
149
+
150
+ # === 公共只读接口:避免直接访问 _spider_registry ===
151
+
152
+ def get_spider_names(self) -> List[str]:
153
+ """获取所有已注册的爬虫名称"""
154
+ return list(self._spider_registry.keys())
155
+
156
+ def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
157
+ """根据 name 获取爬虫类"""
158
+ return self._spider_registry.get(name)
159
+
160
+ def is_spider_registered(self, name: str) -> bool:
161
+ """检查某个 name 是否已注册"""
162
+ return name in self._spider_registry
163
+
164
+ async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
165
+ """启动一个或多个爬虫"""
166
+ spider_classes_to_run = self._resolve_spiders_to_run(spiders)
167
+ total = len(spider_classes_to_run)
126
168
 
127
169
  if total == 0:
128
- raise ValueError("至少需要提供一个爬虫类")
129
-
130
- # 按名称排序
131
- spider_classes.sort(key=lambda cls: cls.__name__.lower())
170
+ raise ValueError("至少需要提供一个爬虫类或名称")
132
171
 
172
+ # 按类名排序,保证启动顺序可预测
173
+ spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
133
174
  logger.info(f"启动 {total} 个爬虫.")
134
175
 
135
- # 流式启动所有爬虫任务
176
+ # 流式启动
136
177
  tasks = [
137
178
  asyncio.create_task(self._run_spider_with_limit(spider_cls, index + 1, total))
138
- for index, spider_cls in enumerate(spider_classes)
179
+ for index, spider_cls in enumerate(spider_classes_to_run)
139
180
  ]
140
181
 
141
- # 等待所有任务完成(失败不中断)
182
+ # 等待完成(失败不中断)
142
183
  results = await asyncio.gather(*tasks, return_exceptions=True)
143
-
144
- # 统计异常
145
184
  failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
146
185
  if failed:
147
- logger.error(f"共 {len(failed)} 个爬虫执行异常: {[spider_classes[i].__name__ for i in failed]}")
148
-
149
- @staticmethod
150
- def _normalize_spiders(spiders) -> List[Type[Spider]]:
151
- """标准化输入为爬虫类列表"""
152
- if isinstance(spiders, type) and issubclass(spiders, Spider):
153
- return [spiders]
154
- elif isinstance(spiders, (list, tuple)):
155
- return list(spiders)
186
+ logger.error(f"共 {len(failed)} 个爬虫执行异常: {[spider_classes_to_run[i].__name__ for i in failed]}")
187
+
188
+ def _resolve_spiders_to_run(
189
+ self,
190
+ spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
191
+ ) -> List[Type[Spider]]:
192
+ """解析输入为爬虫类列表"""
193
+ inputs = self._normalize_inputs(spiders_input)
194
+ seen_spider_names: Set[str] = set()
195
+ spider_classes: List[Type[Spider]] = []
196
+
197
+ for item in inputs:
198
+ spider_cls = self._resolve_spider_class(item)
199
+ spider_name = spider_cls.name
200
+
201
+ if spider_name in seen_spider_names:
202
+ raise ValueError(f"本次运行中爬虫名称 '{spider_name}' 重复。")
203
+
204
+ seen_spider_names.add(spider_name)
205
+ spider_classes.append(spider_cls)
206
+
207
+ return spider_classes
208
+
209
+ def _normalize_inputs(self, spiders_input) -> List[Union[Type[Spider], str]]:
210
+ """标准化输入为列表"""
211
+ if isinstance(spiders_input, (type, str)):
212
+ return [spiders_input]
213
+ elif isinstance(spiders_input, (list, tuple)):
214
+ return list(spiders_input)
156
215
  else:
157
- raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
216
+ raise TypeError("spiders 必须是爬虫类、name 字符串,或它们的列表/元组")
217
+
218
+ def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
219
+ """解析单个输入项为爬虫类"""
220
+ if isinstance(item, type) and issubclass(item, Spider):
221
+ return item
222
+ elif isinstance(item, str):
223
+ spider_cls = self._spider_registry.get(item)
224
+ if not spider_cls:
225
+ raise ValueError(f"未找到名为 '{item}' 的爬虫。")
226
+ return spider_cls
227
+ else:
228
+ raise TypeError(f"无效类型 {type(item)}。必须是 Spider 类或字符串 name。")
158
229
 
159
230
  async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
160
- """
161
- 受信号量限制的爬虫运行函数,带进度日志
162
- """
231
+ """受信号量限制的爬虫运行函数"""
163
232
  task = asyncio.current_task()
164
233
  self._active_tasks.add(task)
165
-
166
234
  try:
167
- # 获取并发许可
168
235
  await self.semaphore.acquire()
169
-
170
- start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
171
- logger.info(start_msg)
172
-
173
- # 创建并运行爬虫
174
- crawler = self._create_crawler(spider_cls)
236
+ logger.info(f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}")
237
+ crawler = Crawler(spider_cls, self.settings)
175
238
  self.crawlers.add(crawler)
176
239
  await crawler.crawl()
177
-
178
- end_msg = f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}"
179
- logger.info(end_msg)
180
-
240
+ logger.info(f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}")
181
241
  except Exception as e:
182
242
  logger.error(f"爬虫 {spider_cls.__name__} 执行失败: {e}", exc_info=True)
183
243
  raise
184
244
  finally:
185
245
  if task in self._active_tasks:
186
246
  self._active_tasks.remove(task)
187
- self.semaphore.release() # 必须释放
188
-
189
- def _create_crawler(self, spider_cls: Type[Spider]) -> Crawler:
190
- """创建爬虫实例"""
191
- if isinstance(spider_cls, str):
192
- raise SpiderTypeError(f"不支持字符串形式的爬虫: {spider_cls}")
193
- return Crawler(spider_cls, self.settings)
247
+ self.semaphore.release()
194
248
 
195
249
  def _shutdown(self, _signum, _frame):
196
250
  """优雅关闭信号处理"""
@@ -216,4 +270,224 @@ class CrawlerProcess:
216
270
  return get_settings()
217
271
  except Exception as e:
218
272
  logger.warning(f"无法加载默认配置: {e}")
219
- return SettingManager()
273
+ return SettingManager()
274
+
275
+ # #!/usr/bin/python
276
+ # # -*- coding: UTF-8 -*-
277
+ # import asyncio
278
+ # import signal
279
+ # from typing import Type, Optional, Set, List
280
+ #
281
+ # from crawlo.spider import Spider
282
+ # from crawlo.core.engine import Engine
283
+ # from crawlo.utils.log import get_logger
284
+ # from crawlo.subscriber import Subscriber
285
+ # from crawlo.extension import ExtensionManager
286
+ # from crawlo.exceptions import SpiderTypeError
287
+ # from crawlo.stats_collector import StatsCollector
288
+ # from crawlo.event import spider_opened, spider_closed
289
+ # from crawlo.settings.setting_manager import SettingManager
290
+ # from crawlo.utils.project import merge_settings, get_settings
291
+ #
292
+ #
293
+ # logger = get_logger(__name__)
294
+ #
295
+ #
296
+ # class Crawler:
297
+ # """单个爬虫运行实例,绑定 Spider 与引擎"""
298
+ #
299
+ # def __init__(self, spider_cls: Type[Spider], settings: SettingManager):
300
+ # self.spider_cls = spider_cls
301
+ # self.spider: Optional[Spider] = None
302
+ # self.engine: Optional[Engine] = None
303
+ # self.stats: Optional[StatsCollector] = None
304
+ # self.subscriber: Optional[Subscriber] = None
305
+ # self.extension: Optional[ExtensionManager] = None
306
+ # self.settings: SettingManager = settings.copy()
307
+ # self._closed = False # 新增状态
308
+ # self._close_lock = asyncio.Lock()
309
+ #
310
+ # async def crawl(self):
311
+ # """启动爬虫核心流程"""
312
+ # self.subscriber = self._create_subscriber()
313
+ # self.spider = self._create_spider()
314
+ # self.engine = self._create_engine()
315
+ # self.stats = self._create_stats()
316
+ # self.extension = self._create_extension()
317
+ #
318
+ # await self.engine.start_spider(self.spider)
319
+ #
320
+ # @staticmethod
321
+ # def _create_subscriber() -> Subscriber:
322
+ # return Subscriber()
323
+ #
324
+ # def _create_spider(self) -> Spider:
325
+ # spider = self.spider_cls.create_instance(self)
326
+ #
327
+ # # --- 关键属性检查 ---
328
+ # if not getattr(spider, 'name', None):
329
+ # raise AttributeError(f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。")
330
+ #
331
+ # if not callable(getattr(spider, 'start_requests', None)):
332
+ # raise AttributeError(f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。")
333
+ #
334
+ # start_urls = getattr(spider, 'start_urls', [])
335
+ # if isinstance(start_urls, str):
336
+ # raise TypeError(f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。")
337
+ #
338
+ # if not callable(getattr(spider, 'parse', None)):
339
+ # logger.warning(
340
+ # f"爬虫 '{spider.name}' 未定义 'parse' 方法。请确保所有 Request 都指定了回调函数,否则响应将被忽略。")
341
+ #
342
+ # self._set_spider(spider)
343
+ # return spider
344
+ #
345
+ # def _create_engine(self) -> Engine:
346
+ # engine = Engine(self)
347
+ # engine.engine_start()
348
+ # return engine
349
+ #
350
+ # def _create_stats(self) -> StatsCollector:
351
+ # return StatsCollector(self)
352
+ #
353
+ # def _create_extension(self) -> ExtensionManager:
354
+ # return ExtensionManager.create_instance(self)
355
+ #
356
+ # def _set_spider(self, spider: Spider):
357
+ # self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
358
+ # self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
359
+ # merge_settings(spider, self.settings)
360
+ #
361
+ # async def close(self, reason='finished') -> None:
362
+ # async with self._close_lock:
363
+ # if self._closed:
364
+ # return
365
+ # self._closed = True
366
+ # await self.subscriber.notify(spider_closed)
367
+ # if self.stats and self.spider:
368
+ # self.stats.close_spider(spider=self.spider, reason=reason)
369
+ #
370
+ #
371
+ # class CrawlerProcess:
372
+ # """
373
+ # 爬虫进程管理器,支持多爬虫并发调度、信号量控制、实时日志与优雅关闭
374
+ # """
375
+ #
376
+ # def __init__(self, settings: Optional[SettingManager] = None, max_concurrency: Optional[int] = None):
377
+ # self.settings: SettingManager = settings or self._get_default_settings()
378
+ # self.crawlers: Set[Crawler] = set()
379
+ # self._active_tasks: Set[asyncio.Task] = set()
380
+ #
381
+ # # 使用专用配置,降级使用 CONCURRENCY
382
+ # self.max_concurrency: int = (
383
+ # max_concurrency
384
+ # or self.settings.get('MAX_RUNNING_SPIDERS')
385
+ # or self.settings.get('CONCURRENCY', 3)
386
+ # )
387
+ # self.semaphore = asyncio.Semaphore(self.max_concurrency)
388
+ #
389
+ # # 注册信号量
390
+ # signal.signal(signal.SIGINT, self._shutdown)
391
+ # signal.signal(signal.SIGTERM, self._shutdown)
392
+ # logger.info(f"CrawlerProcess 初始化完成,最大并行爬虫数: {self.max_concurrency}")
393
+ #
394
+ # async def crawl(self, spiders):
395
+ # """
396
+ # 启动一个或多个爬虫,流式调度,支持实时进度反馈
397
+ # """
398
+ # spider_classes = self._normalize_spiders(spiders)
399
+ # total = len(spider_classes)
400
+ #
401
+ # if total == 0:
402
+ # raise ValueError("至少需要提供一个爬虫类")
403
+ #
404
+ # # 按名称排序
405
+ # spider_classes.sort(key=lambda cls: cls.__name__.lower())
406
+ #
407
+ # logger.info(f"启动 {total} 个爬虫.")
408
+ #
409
+ # # 流式启动所有爬虫任务
410
+ # tasks = [
411
+ # asyncio.create_task(self._run_spider_with_limit(spider_cls, index + 1, total))
412
+ # for index, spider_cls in enumerate(spider_classes)
413
+ # ]
414
+ #
415
+ # # 等待所有任务完成(失败不中断)
416
+ # results = await asyncio.gather(*tasks, return_exceptions=True)
417
+ #
418
+ # # 统计异常
419
+ # failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
420
+ # if failed:
421
+ # logger.error(f"共 {len(failed)} 个爬虫执行异常: {[spider_classes[i].__name__ for i in failed]}")
422
+ #
423
+ # @staticmethod
424
+ # def _normalize_spiders(spiders) -> List[Type[Spider]]:
425
+ # """标准化输入为爬虫类列表"""
426
+ # if isinstance(spiders, type) and issubclass(spiders, Spider):
427
+ # return [spiders]
428
+ # elif isinstance(spiders, (list, tuple)):
429
+ # return list(spiders)
430
+ # else:
431
+ # raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
432
+ #
433
+ # async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
434
+ # """
435
+ # 受信号量限制的爬虫运行函数,带进度日志
436
+ # """
437
+ # task = asyncio.current_task()
438
+ # self._active_tasks.add(task)
439
+ #
440
+ # try:
441
+ # # 获取并发许可
442
+ # await self.semaphore.acquire()
443
+ #
444
+ # start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
445
+ # logger.info(start_msg)
446
+ #
447
+ # # 创建并运行爬虫
448
+ # crawler = self._create_crawler(spider_cls)
449
+ # self.crawlers.add(crawler)
450
+ # await crawler.crawl()
451
+ #
452
+ # end_msg = f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}"
453
+ # logger.info(end_msg)
454
+ #
455
+ # except Exception as e:
456
+ # logger.error(f"爬虫 {spider_cls.__name__} 执行失败: {e}", exc_info=True)
457
+ # raise
458
+ # finally:
459
+ # if task in self._active_tasks:
460
+ # self._active_tasks.remove(task)
461
+ # self.semaphore.release() # 必须释放
462
+ #
463
+ # def _create_crawler(self, spider_cls: Type[Spider]) -> Crawler:
464
+ # """创建爬虫实例"""
465
+ # if isinstance(spider_cls, str):
466
+ # raise SpiderTypeError(f"不支持字符串形式的爬虫: {spider_cls}")
467
+ # return Crawler(spider_cls, self.settings)
468
+ #
469
+ # def _shutdown(self, _signum, _frame):
470
+ # """优雅关闭信号处理"""
471
+ # logger.warning("收到关闭信号,正在停止所有爬虫...")
472
+ # for crawler in list(self.crawlers):
473
+ # if crawler.engine:
474
+ # crawler.engine.running = False
475
+ # crawler.engine.normal = False
476
+ # asyncio.create_task(self._wait_for_shutdown())
477
+ #
478
+ # async def _wait_for_shutdown(self):
479
+ # """等待所有活跃任务完成"""
480
+ # pending = [t for t in self._active_tasks if not t.done()]
481
+ # if pending:
482
+ # logger.info(f"等待 {len(pending)} 个活跃任务完成...")
483
+ # await asyncio.gather(*pending, return_exceptions=True)
484
+ # logger.info("所有爬虫已优雅关闭")
485
+ #
486
+ # @classmethod
487
+ # def _get_default_settings(cls) -> SettingManager:
488
+ # """加载默认配置"""
489
+ # try:
490
+ # return get_settings()
491
+ # except Exception as e:
492
+ # logger.warning(f"无法加载默认配置: {e}")
493
+ # return SettingManager()
crawlo/items/__init__.py CHANGED
@@ -5,9 +5,10 @@ crawlo.items 包
5
5
  ===============
6
6
  提供 Item 和 Field 类用于数据定义和验证。
7
7
  """
8
- from .fields import Field
9
8
  from .items import Item
9
+ from .fields import Field
10
10
  from .base import ItemMeta
11
+
11
12
  from crawlo.exceptions import ItemInitError, ItemAttributeError
12
13
 
13
14
  __all__ = [
crawlo/items/base.py CHANGED
@@ -4,28 +4,19 @@
4
4
  基础元类定义
5
5
  """
6
6
  from abc import ABCMeta
7
-
8
- from crawlo.items import Field
9
-
7
+ from .fields import Field
10
8
 
11
9
  class ItemMeta(ABCMeta):
12
- """
13
- 元类,用于自动收集 Item 类中的 Field 定义
14
- """
15
-
16
10
  def __new__(mcs, name, bases, attrs):
17
11
  fields = {}
18
12
  cls_attrs = {}
19
13
 
20
- # 收集所有 Field 实例
21
14
  for attr_name, attr_value in attrs.items():
22
15
  if isinstance(attr_value, Field):
23
16
  fields[attr_name] = attr_value
24
17
  else:
25
18
  cls_attrs[attr_name] = attr_value
26
19
 
27
- # 创建类实例
28
20
  cls_instance = super().__new__(mcs, name, bases, cls_attrs)
29
21
  cls_instance.FIELDS = fields
30
-
31
22
  return cls_instance
crawlo/spider/__init__.py CHANGED
@@ -1,11 +1,44 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
+ from __future__ import annotations
4
+ from typing import Type, Any, Optional, List, Dict
3
5
  from ..network.request import Request
4
6
  from ..utils.log import get_logger
5
7
 
6
8
 
7
- class Spider(object):
8
- name = None
9
+ # 全局注册表
10
+ _DEFAULT_SPIDER_REGISTRY: dict[str, Type[Spider]] = {}
11
+
12
+
13
+ class SpiderMeta(type):
14
+ def __new__(mcs, name: str, bases: tuple[type], namespace: dict[str, Any], **kwargs):
15
+ cls = super().__new__(mcs, name, bases, namespace)
16
+
17
+ is_spider_subclass = any(
18
+ base is Spider or (isinstance(base, type) and issubclass(base, Spider))
19
+ for base in bases
20
+ )
21
+ if not is_spider_subclass:
22
+ return cls
23
+
24
+ spider_name = namespace.get('name')
25
+ if not isinstance(spider_name, str):
26
+ raise AttributeError(f"爬虫类 '{cls.__name__}' 必须定义字符串类型的 'name' 属性。")
27
+
28
+ if spider_name in _DEFAULT_SPIDER_REGISTRY:
29
+ raise ValueError(
30
+ f"爬虫名称 '{spider_name}' 已被 {_DEFAULT_SPIDER_REGISTRY[spider_name].__name__} 占用。"
31
+ f"请确保每个爬虫的 name 属性全局唯一。"
32
+ )
33
+
34
+ _DEFAULT_SPIDER_REGISTRY[spider_name] = cls
35
+ get_logger(__name__).debug(f"自动注册爬虫: {spider_name} -> {cls.__name__}")
36
+
37
+ return cls
38
+
39
+
40
+ class Spider(metaclass=SpiderMeta):
41
+ name: str = None
9
42
 
10
43
  def __init__(self, name=None, **kwargs):
11
44
  if not hasattr(self, 'start_urls'):
@@ -15,7 +48,7 @@ class Spider(object):
15
48
  self.logger = get_logger(self.name or self.__class__.__name__)
16
49
 
17
50
  @classmethod
18
- def create_instance(cls, crawler):
51
+ def create_instance(cls, crawler) -> Spider:
19
52
  o = cls()
20
53
  o.crawler = crawler
21
54
  return o
@@ -39,3 +72,58 @@ class Spider(object):
39
72
 
40
73
  def __str__(self):
41
74
  return self.__class__.__name__
75
+
76
+
77
+ # === 公共只读接口 ===
78
+ def get_global_spider_registry() -> dict[str, Type[Spider]]:
79
+ return _DEFAULT_SPIDER_REGISTRY.copy()
80
+
81
+
82
+ def get_spider_by_name(name: str) -> Optional[Type[Spider]]:
83
+ return _DEFAULT_SPIDER_REGISTRY.get(name)
84
+
85
+
86
+ def get_all_spider_classes() -> list[Type[Spider]]:
87
+ return list(set(_DEFAULT_SPIDER_REGISTRY.values()))
88
+
89
+ # #!/usr/bin/python
90
+ # # -*- coding:UTF-8 -*-
91
+ # from ..network.request import Request
92
+ # from ..utils.log import get_logger
93
+ #
94
+ #
95
+ # class Spider(object):
96
+ # name = None
97
+ #
98
+ # def __init__(self, name=None, **kwargs):
99
+ # if not hasattr(self, 'start_urls'):
100
+ # self.start_urls = []
101
+ # self.crawler = None
102
+ # self.name = name or self.name
103
+ # self.logger = get_logger(self.name or self.__class__.__name__)
104
+ #
105
+ # @classmethod
106
+ # def create_instance(cls, crawler):
107
+ # o = cls()
108
+ # o.crawler = crawler
109
+ # return o
110
+ #
111
+ # def start_requests(self):
112
+ # if self.start_urls:
113
+ # for url in self.start_urls:
114
+ # yield Request(url=url, dont_filter=True)
115
+ # else:
116
+ # if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
117
+ # yield Request(getattr(self, 'start_url'), dont_filter=True)
118
+ #
119
+ # def parse(self, response):
120
+ # raise NotImplementedError
121
+ #
122
+ # async def spider_opened(self):
123
+ # pass
124
+ #
125
+ # async def spider_closed(self):
126
+ # pass
127
+ #
128
+ # def __str__(self):
129
+ # return self.__class__.__name__