crawlo 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (111) hide show
  1. crawlo/__init__.py +33 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -155
  6. crawlo/commands/genspider.py +125 -110
  7. crawlo/commands/list.py +147 -119
  8. crawlo/commands/run.py +285 -170
  9. crawlo/commands/startproject.py +111 -101
  10. crawlo/commands/stats.py +188 -167
  11. crawlo/core/__init__.py +2 -2
  12. crawlo/core/engine.py +158 -158
  13. crawlo/core/processor.py +40 -40
  14. crawlo/core/scheduler.py +57 -57
  15. crawlo/crawler.py +494 -492
  16. crawlo/downloader/__init__.py +78 -78
  17. crawlo/downloader/aiohttp_downloader.py +199 -199
  18. crawlo/downloader/cffi_downloader.py +242 -277
  19. crawlo/downloader/httpx_downloader.py +246 -246
  20. crawlo/event.py +11 -11
  21. crawlo/exceptions.py +78 -78
  22. crawlo/extension/__init__.py +31 -31
  23. crawlo/extension/log_interval.py +49 -49
  24. crawlo/extension/log_stats.py +44 -44
  25. crawlo/extension/logging_extension.py +34 -34
  26. crawlo/filters/__init__.py +37 -37
  27. crawlo/filters/aioredis_filter.py +150 -150
  28. crawlo/filters/memory_filter.py +202 -202
  29. crawlo/items/__init__.py +23 -23
  30. crawlo/items/base.py +21 -21
  31. crawlo/items/fields.py +53 -53
  32. crawlo/items/items.py +104 -104
  33. crawlo/middleware/__init__.py +21 -21
  34. crawlo/middleware/default_header.py +32 -32
  35. crawlo/middleware/download_delay.py +28 -28
  36. crawlo/middleware/middleware_manager.py +135 -135
  37. crawlo/middleware/proxy.py +245 -245
  38. crawlo/middleware/request_ignore.py +30 -30
  39. crawlo/middleware/response_code.py +18 -18
  40. crawlo/middleware/response_filter.py +26 -26
  41. crawlo/middleware/retry.py +90 -90
  42. crawlo/network/__init__.py +7 -7
  43. crawlo/network/request.py +203 -203
  44. crawlo/network/response.py +166 -166
  45. crawlo/pipelines/__init__.py +13 -13
  46. crawlo/pipelines/console_pipeline.py +39 -39
  47. crawlo/pipelines/mongo_pipeline.py +116 -116
  48. crawlo/pipelines/mysql_batch_pipline.py +272 -272
  49. crawlo/pipelines/mysql_pipeline.py +195 -195
  50. crawlo/pipelines/pipeline_manager.py +56 -56
  51. crawlo/project.py +153 -0
  52. crawlo/settings/__init__.py +7 -7
  53. crawlo/settings/default_settings.py +166 -168
  54. crawlo/settings/setting_manager.py +99 -99
  55. crawlo/spider/__init__.py +129 -129
  56. crawlo/stats_collector.py +59 -59
  57. crawlo/subscriber.py +106 -106
  58. crawlo/task_manager.py +27 -27
  59. crawlo/templates/crawlo.cfg.tmpl +10 -10
  60. crawlo/templates/project/__init__.py.tmpl +3 -3
  61. crawlo/templates/project/items.py.tmpl +17 -17
  62. crawlo/templates/project/middlewares.py.tmpl +75 -75
  63. crawlo/templates/project/pipelines.py.tmpl +63 -63
  64. crawlo/templates/project/settings.py.tmpl +54 -54
  65. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  66. crawlo/templates/spider/spider.py.tmpl +31 -31
  67. crawlo/utils/__init__.py +7 -7
  68. crawlo/utils/date_tools.py +233 -233
  69. crawlo/utils/db_helper.py +343 -343
  70. crawlo/utils/func_tools.py +82 -82
  71. crawlo/utils/log.py +128 -128
  72. crawlo/utils/pqueue.py +173 -173
  73. crawlo/utils/request.py +267 -267
  74. crawlo/utils/spider_loader.py +62 -62
  75. crawlo/utils/system.py +11 -11
  76. crawlo/utils/tools.py +4 -4
  77. crawlo/utils/url.py +39 -39
  78. crawlo-1.1.1.dist-info/METADATA +220 -0
  79. crawlo-1.1.1.dist-info/RECORD +100 -0
  80. examples/__init__.py +7 -0
  81. examples/baidu_spider/__init__.py +7 -0
  82. examples/baidu_spider/demo.py +94 -0
  83. examples/baidu_spider/items.py +46 -0
  84. examples/baidu_spider/middleware.py +49 -0
  85. examples/baidu_spider/pipeline.py +55 -0
  86. examples/baidu_spider/run.py +27 -0
  87. examples/baidu_spider/settings.py +121 -0
  88. examples/baidu_spider/spiders/__init__.py +7 -0
  89. examples/baidu_spider/spiders/bai_du.py +61 -0
  90. examples/baidu_spider/spiders/miit.py +159 -0
  91. examples/baidu_spider/spiders/sina.py +79 -0
  92. tests/__init__.py +7 -7
  93. tests/test_proxy_health_check.py +32 -32
  94. tests/test_proxy_middleware_integration.py +136 -136
  95. tests/test_proxy_providers.py +56 -56
  96. tests/test_proxy_stats.py +19 -19
  97. tests/test_proxy_strategies.py +59 -59
  98. crawlo/utils/concurrency_manager.py +0 -125
  99. crawlo/utils/project.py +0 -197
  100. crawlo-1.1.0.dist-info/METADATA +0 -49
  101. crawlo-1.1.0.dist-info/RECORD +0 -97
  102. examples/gxb/__init__.py +0 -0
  103. examples/gxb/items.py +0 -36
  104. examples/gxb/run.py +0 -16
  105. examples/gxb/settings.py +0 -72
  106. examples/gxb/spider/__init__.py +0 -2
  107. examples/gxb/spider/miit_spider.py +0 -180
  108. examples/gxb/spider/telecom_device.py +0 -129
  109. {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/WHEEL +0 -0
  110. {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/entry_points.txt +0 -0
  111. {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/top_level.txt +0 -0
crawlo/spider/__init__.py CHANGED
@@ -1,129 +1,129 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from __future__ import annotations
4
- from typing import Type, Any, Optional, List, Dict
5
- from ..network.request import Request
6
- from ..utils.log import get_logger
7
-
8
-
9
- # 全局注册表
10
- _DEFAULT_SPIDER_REGISTRY: dict[str, Type[Spider]] = {}
11
-
12
-
13
- class SpiderMeta(type):
14
- def __new__(mcs, name: str, bases: tuple[type], namespace: dict[str, Any], **kwargs):
15
- cls = super().__new__(mcs, name, bases, namespace)
16
-
17
- is_spider_subclass = any(
18
- base is Spider or (isinstance(base, type) and issubclass(base, Spider))
19
- for base in bases
20
- )
21
- if not is_spider_subclass:
22
- return cls
23
-
24
- spider_name = namespace.get('name')
25
- if not isinstance(spider_name, str):
26
- raise AttributeError(f"爬虫类 '{cls.__name__}' 必须定义字符串类型的 'name' 属性。")
27
-
28
- if spider_name in _DEFAULT_SPIDER_REGISTRY:
29
- raise ValueError(
30
- f"爬虫名称 '{spider_name}' 已被 {_DEFAULT_SPIDER_REGISTRY[spider_name].__name__} 占用。"
31
- f"请确保每个爬虫的 name 属性全局唯一。"
32
- )
33
-
34
- _DEFAULT_SPIDER_REGISTRY[spider_name] = cls
35
- get_logger(__name__).debug(f"自动注册爬虫: {spider_name} -> {cls.__name__}")
36
-
37
- return cls
38
-
39
-
40
- class Spider(metaclass=SpiderMeta):
41
- name: str = None
42
-
43
- def __init__(self, name=None, **kwargs):
44
- if not hasattr(self, 'start_urls'):
45
- self.start_urls = []
46
- self.crawler = None
47
- self.name = name or self.name
48
- self.logger = get_logger(self.name or self.__class__.__name__)
49
-
50
- @classmethod
51
- def create_instance(cls, crawler) -> Spider:
52
- o = cls()
53
- o.crawler = crawler
54
- return o
55
-
56
- def start_requests(self):
57
- if self.start_urls:
58
- for url in self.start_urls:
59
- yield Request(url=url, dont_filter=True)
60
- else:
61
- if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
62
- yield Request(getattr(self, 'start_url'), dont_filter=True)
63
-
64
- def parse(self, response):
65
- raise NotImplementedError
66
-
67
- async def spider_opened(self):
68
- pass
69
-
70
- async def spider_closed(self):
71
- pass
72
-
73
- def __str__(self):
74
- return self.__class__.__name__
75
-
76
-
77
- # === 公共只读接口 ===
78
- def get_global_spider_registry() -> dict[str, Type[Spider]]:
79
- return _DEFAULT_SPIDER_REGISTRY.copy()
80
-
81
-
82
- def get_spider_by_name(name: str) -> Optional[Type[Spider]]:
83
- return _DEFAULT_SPIDER_REGISTRY.get(name)
84
-
85
-
86
- def get_all_spider_classes() -> list[Type[Spider]]:
87
- return list(set(_DEFAULT_SPIDER_REGISTRY.values()))
88
-
89
- # #!/usr/bin/python
90
- # # -*- coding:UTF-8 -*-
91
- # from ..network.request import Request
92
- # from ..utils.log import get_logger
93
- #
94
- #
95
- # class Spider(object):
96
- # name = None
97
- #
98
- # def __init__(self, name=None, **kwargs):
99
- # if not hasattr(self, 'start_urls'):
100
- # self.start_urls = []
101
- # self.crawler = None
102
- # self.name = name or self.name
103
- # self.logger = get_logger(self.name or self.__class__.__name__)
104
- #
105
- # @classmethod
106
- # def create_instance(cls, crawler):
107
- # o = cls()
108
- # o.crawler = crawler
109
- # return o
110
- #
111
- # def start_requests(self):
112
- # if self.start_urls:
113
- # for url in self.start_urls:
114
- # yield Request(url=url, dont_filter=True)
115
- # else:
116
- # if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
117
- # yield Request(getattr(self, 'start_url'), dont_filter=True)
118
- #
119
- # def parse(self, response):
120
- # raise NotImplementedError
121
- #
122
- # async def spider_opened(self):
123
- # pass
124
- #
125
- # async def spider_closed(self):
126
- # pass
127
- #
128
- # def __str__(self):
129
- # return self.__class__.__name__
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from __future__ import annotations
4
+ from typing import Type, Any, Optional, List, Dict
5
+ from ..network.request import Request
6
+ from ..utils.log import get_logger
7
+
8
+
9
+ # 全局注册表
10
+ _DEFAULT_SPIDER_REGISTRY: dict[str, Type[Spider]] = {}
11
+
12
+
13
+ class SpiderMeta(type):
14
+ def __new__(mcs, name: str, bases: tuple[type], namespace: dict[str, Any], **kwargs):
15
+ cls = super().__new__(mcs, name, bases, namespace)
16
+
17
+ is_spider_subclass = any(
18
+ base is Spider or (isinstance(base, type) and issubclass(base, Spider))
19
+ for base in bases
20
+ )
21
+ if not is_spider_subclass:
22
+ return cls
23
+
24
+ spider_name = namespace.get('name')
25
+ if not isinstance(spider_name, str):
26
+ raise AttributeError(f"爬虫类 '{cls.__name__}' 必须定义字符串类型的 'name' 属性。")
27
+
28
+ if spider_name in _DEFAULT_SPIDER_REGISTRY:
29
+ raise ValueError(
30
+ f"爬虫名称 '{spider_name}' 已被 {_DEFAULT_SPIDER_REGISTRY[spider_name].__name__} 占用。"
31
+ f"请确保每个爬虫的 name 属性全局唯一。"
32
+ )
33
+
34
+ _DEFAULT_SPIDER_REGISTRY[spider_name] = cls
35
+ get_logger(__name__).debug(f"自动注册爬虫: {spider_name} -> {cls.__name__}")
36
+
37
+ return cls
38
+
39
+
40
+ class Spider(metaclass=SpiderMeta):
41
+ name: str = None
42
+
43
+ def __init__(self, name=None, **kwargs):
44
+ if not hasattr(self, 'start_urls'):
45
+ self.start_urls = []
46
+ self.crawler = None
47
+ self.name = name or self.name
48
+ self.logger = get_logger(self.name or self.__class__.__name__)
49
+
50
+ @classmethod
51
+ def create_instance(cls, crawler) -> Spider:
52
+ o = cls()
53
+ o.crawler = crawler
54
+ return o
55
+
56
+ def start_requests(self):
57
+ if self.start_urls:
58
+ for url in self.start_urls:
59
+ yield Request(url=url, dont_filter=True)
60
+ else:
61
+ if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
62
+ yield Request(getattr(self, 'start_url'), dont_filter=True)
63
+
64
+ def parse(self, response):
65
+ raise NotImplementedError
66
+
67
+ async def spider_opened(self):
68
+ pass
69
+
70
+ async def spider_closed(self):
71
+ pass
72
+
73
+ def __str__(self):
74
+ return self.__class__.__name__
75
+
76
+
77
+ # === 公共只读接口 ===
78
+ def get_global_spider_registry() -> dict[str, Type[Spider]]:
79
+ return _DEFAULT_SPIDER_REGISTRY.copy()
80
+
81
+
82
+ def get_spider_by_name(name: str) -> Optional[Type[Spider]]:
83
+ return _DEFAULT_SPIDER_REGISTRY.get(name)
84
+
85
+
86
+ def get_all_spider_classes() -> list[Type[Spider]]:
87
+ return list(set(_DEFAULT_SPIDER_REGISTRY.values()))
88
+
89
+ # #!/usr/bin/python
90
+ # # -*- coding:UTF-8 -*-
91
+ # from ..network.request import Request
92
+ # from ..utils.log import get_logger
93
+ #
94
+ #
95
+ # class Spider(object):
96
+ # name = None
97
+ #
98
+ # def __init__(self, name=None, **kwargs):
99
+ # if not hasattr(self, 'start_urls'):
100
+ # self.start_urls = []
101
+ # self.crawler = None
102
+ # self.name = name or self.name
103
+ # self.logger = get_logger(self.name or self.__class__.__name__)
104
+ #
105
+ # @classmethod
106
+ # def create_instance(cls, crawler):
107
+ # o = cls()
108
+ # o.crawler = crawler
109
+ # return o
110
+ #
111
+ # def start_requests(self):
112
+ # if self.start_urls:
113
+ # for url in self.start_urls:
114
+ # yield Request(url=url, dont_filter=True)
115
+ # else:
116
+ # if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
117
+ # yield Request(getattr(self, 'start_url'), dont_filter=True)
118
+ #
119
+ # def parse(self, response):
120
+ # raise NotImplementedError
121
+ #
122
+ # async def spider_opened(self):
123
+ # pass
124
+ #
125
+ # async def spider_closed(self):
126
+ # pass
127
+ #
128
+ # def __str__(self):
129
+ # return self.__class__.__name__
crawlo/stats_collector.py CHANGED
@@ -1,59 +1,59 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-05-17 09:57
5
- # @Author : crawl-coder
6
- # @Desc : 统计信息收集器
7
- """
8
- from pprint import pformat
9
- from crawlo.utils.log import get_logger
10
-
11
-
12
- class StatsCollector(object):
13
-
14
- def __init__(self, crawler):
15
- self.crawler = crawler
16
- self._dump = self.crawler.settings.get_bool('STATS_DUMP')
17
- self._stats = {}
18
- self.logger = get_logger(self.__class__.__name__, "INFO")
19
-
20
- def inc_value(self, key, count=1, start=0):
21
- self._stats[key] = self._stats.setdefault(key, start) + count
22
-
23
- def get_value(self, key, default=None):
24
- return self._stats.get(key, default)
25
-
26
- def get_stats(self):
27
- return self._stats
28
-
29
- def set_stats(self, stats):
30
- self._stats = stats
31
-
32
- def clear_stats(self):
33
- self._stats.clear()
34
-
35
- def close_spider(self, spider, reason):
36
- self._stats['reason'] = reason
37
-
38
- # 首选:使用 spider.name
39
- # 次选:使用实例的类名
40
- # 最后:使用一个完全未知的占位符
41
- spider_name = (
42
- getattr(spider, 'name', None) or
43
- spider.__class__.__name__ or
44
- '<Unknown>'
45
- )
46
-
47
- self._stats['spider_name'] = spider_name
48
-
49
- if self._dump:
50
- self.logger.info(f'{spider_name} stats: \n{pformat(self._stats)}')
51
-
52
- def __getitem__(self, item):
53
- return self._stats[item]
54
-
55
- def __setitem__(self, key, value):
56
- self._stats[key] = value
57
-
58
- def __delitem__(self, key):
59
- del self._stats[key]
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-05-17 09:57
5
+ # @Author : crawl-coder
6
+ # @Desc : 统计信息收集器
7
+ """
8
+ from pprint import pformat
9
+ from crawlo.utils.log import get_logger
10
+
11
+
12
+ class StatsCollector(object):
13
+
14
+ def __init__(self, crawler):
15
+ self.crawler = crawler
16
+ self._dump = self.crawler.settings.get_bool('STATS_DUMP')
17
+ self._stats = {}
18
+ self.logger = get_logger(self.__class__.__name__, "INFO")
19
+
20
+ def inc_value(self, key, count=1, start=0):
21
+ self._stats[key] = self._stats.setdefault(key, start) + count
22
+
23
+ def get_value(self, key, default=None):
24
+ return self._stats.get(key, default)
25
+
26
+ def get_stats(self):
27
+ return self._stats
28
+
29
+ def set_stats(self, stats):
30
+ self._stats = stats
31
+
32
+ def clear_stats(self):
33
+ self._stats.clear()
34
+
35
+ def close_spider(self, spider, reason):
36
+ self._stats['reason'] = reason
37
+
38
+ # 首选:使用 spider.name
39
+ # 次选:使用实例的类名
40
+ # 最后:使用一个完全未知的占位符
41
+ spider_name = (
42
+ getattr(spider, 'name', None) or
43
+ spider.__class__.__name__ or
44
+ '<Unknown>'
45
+ )
46
+
47
+ self._stats['spider_name'] = spider_name
48
+
49
+ if self._dump:
50
+ self.logger.info(f'{spider_name} stats: \n{pformat(self._stats)}')
51
+
52
+ def __getitem__(self, item):
53
+ return self._stats[item]
54
+
55
+ def __setitem__(self, key, value):
56
+ self._stats[key] = value
57
+
58
+ def __delitem__(self, key):
59
+ del self._stats[key]
crawlo/subscriber.py CHANGED
@@ -1,106 +1,106 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import asyncio
4
- from collections import defaultdict
5
- from inspect import iscoroutinefunction
6
- from typing import Dict, Set, Callable, Coroutine, Any, TypeAlias, List
7
-
8
-
9
- class ReceiverTypeError(TypeError):
10
- """当订阅的接收者不是一个协程函数时抛出。"""
11
- pass
12
-
13
-
14
- ReceiverCoroutine: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
15
-
16
-
17
- class Subscriber:
18
- """
19
- 一个支持异步协程的发布/订阅(Pub/Sub)模式实现。
20
-
21
- 这个类允许你注册(订阅)协程函数来监听特定事件,并在事件发生时
22
- 以并发的方式异步地通知所有订阅者。
23
- """
24
-
25
- def __init__(self):
26
- """初始化一个空的订阅者字典。"""
27
- self._subscribers: Dict[str, Set[ReceiverCoroutine]] = defaultdict(set)
28
-
29
- def subscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
30
- """
31
- 订阅一个事件。
32
-
33
- Args:
34
- receiver: 一个协程函数 (例如 async def my_func(...))。
35
- event: 要订阅的事件名称。
36
-
37
- Raises:
38
- ReceiverTypeError: 如果提供的 `receiver` 不是一个协程函数。
39
- """
40
- if not iscoroutinefunction(receiver):
41
- raise ReceiverTypeError(f"接收者 '{receiver.__qualname__}' 必须是一个协程函数。")
42
- self._subscribers[event].add(receiver)
43
-
44
- def unsubscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
45
- """
46
- 取消订阅一个事件。
47
-
48
- 如果事件或接收者不存在,将静默处理。
49
-
50
- Args:
51
- receiver: 要取消订阅的协程函数。
52
- event: 事件名称。
53
- """
54
- if event in self._subscribers:
55
- self._subscribers[event].discard(receiver)
56
-
57
- async def notify(self, event: str, *args, **kwargs) -> List[Any]:
58
- """
59
- 异步地、并发地通知所有订阅了该事件的接收者。
60
-
61
- 此方法会等待所有订阅者任务完成后再返回,并收集所有结果或异常。
62
-
63
- Args:
64
- event: 要触发的事件名称。
65
- *args: 传递给接收者的位置参数。
66
- **kwargs: 传递给接收者的关键字参数。
67
-
68
- Returns:
69
- 一个列表,包含每个订阅者任务的返回结果或在执行期间捕获的异常。
70
- """
71
- receivers = self._subscribers.get(event, set())
72
- if not receivers:
73
- return []
74
-
75
- tasks = [asyncio.create_task(receiver(*args, **kwargs)) for receiver in receivers]
76
-
77
- # 并发执行所有任务并返回结果列表(包括异常)
78
- return await asyncio.gather(*tasks, return_exceptions=True)
79
-
80
- # #!/usr/bin/python
81
- # # -*- coding:UTF-8 -*-
82
- # import asyncio
83
- # from collections import defaultdict
84
- # from inspect import iscoroutinefunction
85
- # from typing import Dict, Set, Callable, Coroutine
86
- #
87
- # from crawlo.exceptions import ReceiverTypeError
88
- #
89
- #
90
- # class Subscriber:
91
- #
92
- # def __init__(self):
93
- # self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
94
- #
95
- # def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
96
- # if not iscoroutinefunction(receiver):
97
- # raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
98
- # self._subscribers[event].add(receiver)
99
- #
100
- # def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
101
- # self._subscribers[event].discard(receiver)
102
- #
103
- # async def notify(self, event: str, *args, **kwargs) -> None:
104
- # for receiver in self._subscribers[event]:
105
- # # 不能 await
106
- # asyncio.create_task(receiver(*args, **kwargs))
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ from collections import defaultdict
5
+ from inspect import iscoroutinefunction
6
+ from typing import Dict, Set, Callable, Coroutine, Any, TypeAlias, List
7
+
8
+
9
+ class ReceiverTypeError(TypeError):
10
+ """当订阅的接收者不是一个协程函数时抛出。"""
11
+ pass
12
+
13
+
14
+ ReceiverCoroutine: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
15
+
16
+
17
+ class Subscriber:
18
+ """
19
+ 一个支持异步协程的发布/订阅(Pub/Sub)模式实现。
20
+
21
+ 这个类允许你注册(订阅)协程函数来监听特定事件,并在事件发生时
22
+ 以并发的方式异步地通知所有订阅者。
23
+ """
24
+
25
+ def __init__(self):
26
+ """初始化一个空的订阅者字典。"""
27
+ self._subscribers: Dict[str, Set[ReceiverCoroutine]] = defaultdict(set)
28
+
29
+ def subscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
30
+ """
31
+ 订阅一个事件。
32
+
33
+ Args:
34
+ receiver: 一个协程函数 (例如 async def my_func(...))。
35
+ event: 要订阅的事件名称。
36
+
37
+ Raises:
38
+ ReceiverTypeError: 如果提供的 `receiver` 不是一个协程函数。
39
+ """
40
+ if not iscoroutinefunction(receiver):
41
+ raise ReceiverTypeError(f"接收者 '{receiver.__qualname__}' 必须是一个协程函数。")
42
+ self._subscribers[event].add(receiver)
43
+
44
+ def unsubscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
45
+ """
46
+ 取消订阅一个事件。
47
+
48
+ 如果事件或接收者不存在,将静默处理。
49
+
50
+ Args:
51
+ receiver: 要取消订阅的协程函数。
52
+ event: 事件名称。
53
+ """
54
+ if event in self._subscribers:
55
+ self._subscribers[event].discard(receiver)
56
+
57
+ async def notify(self, event: str, *args, **kwargs) -> List[Any]:
58
+ """
59
+ 异步地、并发地通知所有订阅了该事件的接收者。
60
+
61
+ 此方法会等待所有订阅者任务完成后再返回,并收集所有结果或异常。
62
+
63
+ Args:
64
+ event: 要触发的事件名称。
65
+ *args: 传递给接收者的位置参数。
66
+ **kwargs: 传递给接收者的关键字参数。
67
+
68
+ Returns:
69
+ 一个列表,包含每个订阅者任务的返回结果或在执行期间捕获的异常。
70
+ """
71
+ receivers = self._subscribers.get(event, set())
72
+ if not receivers:
73
+ return []
74
+
75
+ tasks = [asyncio.create_task(receiver(*args, **kwargs)) for receiver in receivers]
76
+
77
+ # 并发执行所有任务并返回结果列表(包括异常)
78
+ return await asyncio.gather(*tasks, return_exceptions=True)
79
+
80
+ # #!/usr/bin/python
81
+ # # -*- coding:UTF-8 -*-
82
+ # import asyncio
83
+ # from collections import defaultdict
84
+ # from inspect import iscoroutinefunction
85
+ # from typing import Dict, Set, Callable, Coroutine
86
+ #
87
+ # from crawlo.exceptions import ReceiverTypeError
88
+ #
89
+ #
90
+ # class Subscriber:
91
+ #
92
+ # def __init__(self):
93
+ # self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
94
+ #
95
+ # def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
96
+ # if not iscoroutinefunction(receiver):
97
+ # raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
98
+ # self._subscribers[event].add(receiver)
99
+ #
100
+ # def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
101
+ # self._subscribers[event].discard(receiver)
102
+ #
103
+ # async def notify(self, event: str, *args, **kwargs) -> None:
104
+ # for receiver in self._subscribers[event]:
105
+ # # 不能 await
106
+ # asyncio.create_task(receiver(*args, **kwargs))
crawlo/task_manager.py CHANGED
@@ -1,27 +1,27 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import asyncio
4
- from asyncio import Task, Future, Semaphore
5
- from typing import Set, Final
6
-
7
-
8
- class TaskManager:
9
-
10
- def __init__(self, total_concurrency: int = 8):
11
- self.current_task: Final[Set] = set()
12
- self.semaphore: Semaphore = Semaphore(total_concurrency)
13
-
14
- def create_task(self, coroutine) -> Task:
15
- task = asyncio.create_task(coroutine)
16
- self.current_task.add(task)
17
-
18
- def done_callback(_future: Future) -> None:
19
- self.current_task.remove(task)
20
- self.semaphore.release()
21
-
22
- task.add_done_callback(done_callback)
23
-
24
- return task
25
-
26
- def all_done(self) -> bool:
27
- return len(self.current_task) == 0
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ from asyncio import Task, Future, Semaphore
5
+ from typing import Set, Final
6
+
7
+
8
+ class TaskManager:
9
+
10
+ def __init__(self, total_concurrency: int = 8):
11
+ self.current_task: Final[Set] = set()
12
+ self.semaphore: Semaphore = Semaphore(total_concurrency)
13
+
14
+ def create_task(self, coroutine) -> Task:
15
+ task = asyncio.create_task(coroutine)
16
+ self.current_task.add(task)
17
+
18
+ def done_callback(_future: Future) -> None:
19
+ self.current_task.remove(task)
20
+ self.semaphore.release()
21
+
22
+ task.add_done_callback(done_callback)
23
+
24
+ return task
25
+
26
+ def all_done(self) -> bool:
27
+ return len(self.current_task) == 0
@@ -1,11 +1,11 @@
1
- # crawlo.cfg
2
- # 项目的配置文件。
3
-
4
- [settings]
5
- # 指定 settings 模块的导入路径
6
- default = {{project_name}}.settings
7
-
8
- [deploy]
9
- # (可选)用于部署配置
10
- # url = http://localhost:6800/
1
+ # crawlo.cfg
2
+ # 项目的配置文件。
3
+
4
+ [settings]
5
+ # 指定 settings 模块的导入路径
6
+ default = {{project_name}}.settings
7
+
8
+ [deploy]
9
+ # (可选)用于部署配置
10
+ # url = http://localhost:6800/
11
11
  # project = {{project_name}}