crawlo 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (80) hide show
  1. crawlo/__init__.py +9 -6
  2. crawlo/__version__.py +1 -2
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -59
  7. crawlo/crawler.py +242 -107
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +259 -96
  10. crawlo/downloader/httpx_downloader.py +187 -48
  11. crawlo/downloader/playwright_downloader.py +160 -160
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +64 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/filters/__init__.py +37 -37
  18. crawlo/filters/aioredis_filter.py +157 -129
  19. crawlo/filters/memory_filter.py +202 -203
  20. crawlo/filters/redis_filter.py +119 -119
  21. crawlo/items/__init__.py +62 -62
  22. crawlo/items/items.py +118 -118
  23. crawlo/middleware/__init__.py +21 -21
  24. crawlo/middleware/default_header.py +32 -32
  25. crawlo/middleware/download_delay.py +28 -28
  26. crawlo/middleware/middleware_manager.py +140 -140
  27. crawlo/middleware/request_ignore.py +30 -30
  28. crawlo/middleware/response_code.py +18 -18
  29. crawlo/middleware/response_filter.py +26 -26
  30. crawlo/middleware/retry.py +90 -89
  31. crawlo/network/__init__.py +7 -7
  32. crawlo/network/request.py +205 -155
  33. crawlo/network/response.py +166 -93
  34. crawlo/pipelines/__init__.py +13 -13
  35. crawlo/pipelines/console_pipeline.py +39 -39
  36. crawlo/pipelines/mongo_pipeline.py +116 -116
  37. crawlo/pipelines/mysql_batch_pipline.py +133 -133
  38. crawlo/pipelines/mysql_pipeline.py +195 -176
  39. crawlo/pipelines/pipeline_manager.py +56 -56
  40. crawlo/settings/__init__.py +7 -7
  41. crawlo/settings/default_settings.py +93 -89
  42. crawlo/settings/setting_manager.py +99 -99
  43. crawlo/spider/__init__.py +36 -36
  44. crawlo/stats_collector.py +59 -47
  45. crawlo/subscriber.py +106 -27
  46. crawlo/task_manager.py +27 -27
  47. crawlo/templates/item_template.tmpl +21 -21
  48. crawlo/templates/project_template/main.py +32 -32
  49. crawlo/templates/project_template/setting.py +189 -189
  50. crawlo/templates/spider_template.tmpl +30 -30
  51. crawlo/utils/__init__.py +7 -7
  52. crawlo/utils/concurrency_manager.py +125 -0
  53. crawlo/utils/date_tools.py +177 -177
  54. crawlo/utils/func_tools.py +82 -82
  55. crawlo/utils/log.py +39 -39
  56. crawlo/utils/pqueue.py +173 -173
  57. crawlo/utils/project.py +59 -59
  58. crawlo/utils/request.py +122 -85
  59. crawlo/utils/system.py +11 -11
  60. crawlo/utils/tools.py +303 -0
  61. crawlo/utils/url.py +39 -39
  62. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/METADATA +48 -36
  63. crawlo-1.0.3.dist-info/RECORD +80 -0
  64. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/top_level.txt +1 -0
  65. tests/__init__.py +7 -0
  66. tests/baidu_spider/__init__.py +7 -0
  67. tests/baidu_spider/demo.py +94 -0
  68. tests/baidu_spider/items.py +25 -0
  69. tests/baidu_spider/middleware.py +49 -0
  70. tests/baidu_spider/pipeline.py +55 -0
  71. tests/baidu_spider/request_fingerprints.txt +9 -0
  72. tests/baidu_spider/run.py +27 -0
  73. tests/baidu_spider/settings.py +78 -0
  74. tests/baidu_spider/spiders/__init__.py +7 -0
  75. tests/baidu_spider/spiders/bai_du.py +61 -0
  76. tests/baidu_spider/spiders/sina.py +79 -0
  77. crawlo-1.0.1.dist-info/RECORD +0 -67
  78. crawlo-1.0.1.dist-info/licenses/LICENSE +0 -23
  79. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/WHEEL +0 -0
  80. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/entry_points.txt +0 -0
crawlo/crawler.py CHANGED
@@ -1,107 +1,242 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*
3
- import signal
4
- import asyncio
5
- from typing import Type, Final, Set, Optional
6
-
7
- from crawlo.spider import Spider
8
- from crawlo.core.engine import Engine
9
- from crawlo.subscriber import Subscriber
10
-
11
- from crawlo.utils.log import get_logger
12
- from crawlo.extension import ExtensionManager
13
- from crawlo.exceptions import SpiderTypeError
14
- from crawlo.utils.project import merge_settings
15
- from crawlo.stats_collector import StatsCollector
16
- from crawlo.event import spider_opened, spider_closed
17
- from crawlo.settings.setting_manager import SettingManager
18
-
19
- logger = get_logger(__name__)
20
-
21
-
22
- class Crawler:
23
-
24
- def __init__(self, spider_cls, settings):
25
- self.spider_cls = spider_cls
26
- self.spider: Optional[Spider] = None
27
- self.engine: Optional[Engine] = None
28
- self.stats: Optional[StatsCollector] = None
29
- self.subscriber: Optional[Subscriber] = None
30
- self.extension: Optional[ExtensionManager] = None
31
- self.settings: SettingManager = settings.copy()
32
-
33
- async def crawl(self):
34
- self.subscriber = self._create_subscriber()
35
- self.spider = self._create_spider()
36
- self.engine = self._create_engine()
37
- self.stats = self._create_stats()
38
- self.extension = self._create_extension()
39
-
40
- await self.engine.start_spider(self.spider)
41
-
42
- @staticmethod
43
- def _create_subscriber():
44
- return Subscriber()
45
-
46
- def _create_spider(self) -> Spider:
47
- spider = self.spider_cls.create_instance(self)
48
- self._set_spider(spider)
49
- return spider
50
-
51
- def _create_engine(self) -> Engine:
52
- engine = Engine(self)
53
- engine.engine_start()
54
- return engine
55
-
56
- def _create_stats(self) -> StatsCollector:
57
- stats = StatsCollector(self)
58
- return stats
59
-
60
- def _create_extension(self) -> ExtensionManager:
61
- extension = ExtensionManager.create_instance(self)
62
- return extension
63
-
64
- def _set_spider(self, spider):
65
- self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
66
- self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
67
- merge_settings(spider, self.settings)
68
-
69
- async def close(self, reason='finished') -> None:
70
- await asyncio.create_task(self.subscriber.notify(spider_closed))
71
- self.stats.close_spider(spider_name=self.spider, reason=reason)
72
-
73
-
74
- class CrawlerProcess:
75
-
76
- def __init__(self, settings=None):
77
- self.crawlers: Final[Set] = set()
78
- self._active_spiders: Final[Set] = set()
79
- self.settings = settings
80
-
81
- signal.signal(signal.SIGINT, self._shutdown)
82
-
83
- async def crawl(self, spider: Type[Spider]):
84
- crawler: Crawler = self._create_crawler(spider)
85
- self.crawlers.add(crawler)
86
- task = await self._crawl(crawler)
87
- self._active_spiders.add(task)
88
-
89
- @staticmethod
90
- async def _crawl(crawler):
91
- return asyncio.create_task(crawler.crawl())
92
-
93
- async def start(self):
94
- await asyncio.gather(*self._active_spiders)
95
-
96
- def _create_crawler(self, spider_cls) -> Crawler:
97
- if isinstance(spider_cls, str):
98
- raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
99
- crawler: Crawler = Crawler(spider_cls, self.settings)
100
- return crawler
101
-
102
- def _shutdown(self, _signum, _frame):
103
- for crawler in self.crawlers:
104
- crawler.engine.running = False
105
- crawler.engine.normal = False
106
- crawler.stats.close_spider(crawler.spider, 'Ctrl C')
107
- logger.warning(f'spiders received: `Ctrl C` signal, closed.')
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*
3
+ import signal
4
+ import asyncio
5
+ from typing import Type, Final, Set, Optional
6
+
7
+ from crawlo.spider import Spider
8
+ from crawlo.core.engine import Engine
9
+ from crawlo.utils.log import get_logger
10
+ from crawlo.subscriber import Subscriber
11
+ from crawlo.extension import ExtensionManager
12
+ from crawlo.exceptions import SpiderTypeError
13
+ from crawlo.stats_collector import StatsCollector
14
+ from crawlo.event import spider_opened, spider_closed
15
+ from crawlo.settings.setting_manager import SettingManager
16
+ from crawlo.utils.project import merge_settings, get_settings
17
+ from crawlo.utils.concurrency_manager import calculate_optimal_concurrency
18
+
19
+ logger = get_logger(__name__)
20
+
21
+
22
+ class Crawler:
23
+
24
+ def __init__(self, spider_cls, settings):
25
+ self.spider_cls = spider_cls
26
+ self.spider: Optional[Spider] = None
27
+ self.engine: Optional[Engine] = None
28
+ self.stats: Optional[StatsCollector] = None
29
+ self.subscriber: Optional[Subscriber] = None
30
+ self.extension: Optional[ExtensionManager] = None
31
+ self.settings: SettingManager = settings.copy()
32
+
33
+ async def crawl(self):
34
+ self.subscriber = self._create_subscriber()
35
+ self.spider = self._create_spider()
36
+ self.engine = self._create_engine()
37
+ self.stats = self._create_stats()
38
+ self.extension = self._create_extension()
39
+
40
+ await self.engine.start_spider(self.spider)
41
+
42
+ @staticmethod
43
+ def _create_subscriber():
44
+ return Subscriber()
45
+
46
+ def _create_spider(self) -> Spider:
47
+ spider = self.spider_cls.create_instance(self)
48
+
49
+ # --- 关键属性检查 ---
50
+ # 1. 检查 name
51
+ if not getattr(spider, 'name', None):
52
+ raise AttributeError(f"Spider class '{self.spider_cls.__name__}' must have a 'name' attribute.")
53
+
54
+ # 2. 检查 start_requests 是否可调用
55
+ if not callable(getattr(spider, 'start_requests', None)):
56
+ raise AttributeError(f"Spider '{spider.name}' must have a callable 'start_requests' method.")
57
+
58
+ # 3. 检查 start_urls 类型
59
+ start_urls = getattr(spider, 'start_urls', [])
60
+ if isinstance(start_urls, str):
61
+ raise TypeError(f"'{spider.name}.start_urls' must be a list or tuple, not a string.")
62
+
63
+ # --- 日志提示 ---
64
+ # 提醒用户定义 parse 方法
65
+ if not callable(getattr(spider, 'parse', None)):
66
+ logger.warning(f"Spider '{spider.name}' lacks a 'parse' method. Ensure all Requests have callbacks.")
67
+
68
+ self._set_spider(spider)
69
+ return spider
70
+
71
+ def _create_engine(self) -> Engine:
72
+ engine = Engine(self)
73
+ engine.engine_start()
74
+ return engine
75
+
76
+ def _create_stats(self) -> StatsCollector:
77
+ stats = StatsCollector(self)
78
+ return stats
79
+
80
+ def _create_extension(self) -> ExtensionManager:
81
+ extension = ExtensionManager.create_instance(self)
82
+ return extension
83
+
84
+ def _set_spider(self, spider):
85
+ self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
86
+ self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
87
+ merge_settings(spider, self.settings)
88
+
89
+ async def close(self, reason='finished') -> None:
90
+ await asyncio.create_task(self.subscriber.notify(spider_closed))
91
+ self.stats.close_spider(spider=self.spider, reason=reason)
92
+
93
+
94
+ class CrawlerProcess:
95
+ """爬虫处理类,支持跨平台动态并发控制和精细化日志"""
96
+
97
+ def __init__(self, settings=None, max_concurrency: Optional[int] = None, batch_size: int = 50):
98
+ self.crawlers: Final[Set] = set()
99
+ self._active_spiders: Final[Set] = set()
100
+ self.settings = settings or self._get_default_settings()
101
+ self.batch_size = batch_size
102
+
103
+ # 使用独立模块计算最大并发数
104
+ self.max_concurrency = calculate_optimal_concurrency(max_concurrency)
105
+ self.semaphore = asyncio.Semaphore(self.max_concurrency)
106
+
107
+ signal.signal(signal.SIGINT, self._shutdown)
108
+ logger.info(f"初始化爬虫处理进程,最大并发数: {self.max_concurrency}")
109
+
110
+ async def crawl(self, spiders):
111
+ """支持单个或多个爬虫的批量处理,优化日志输出"""
112
+ if not spiders:
113
+ raise ValueError("至少需要提供一个爬虫类")
114
+
115
+ # 统一转换为列表
116
+ if isinstance(spiders, type) and issubclass(spiders, Spider):
117
+ spiders = [spiders]
118
+ elif isinstance(spiders, (list, tuple)):
119
+ spiders = list(spiders)
120
+ else:
121
+ raise TypeError("spiders 必须是爬虫类或爬虫类列表/元组")
122
+
123
+ # 按爬虫类名首字母排序(升序)
124
+ spiders.sort(key=lambda x: x.__name__.lower())
125
+
126
+ if len(spiders) == 1:
127
+ logger.info(f"启动爬虫: {spiders[0].__name__}")
128
+ else:
129
+ logger.info(f"启动{len(spiders)}个爬虫,按名称排序后分批处理中")
130
+
131
+ batches = [spiders[i:i + self.batch_size] for i in range(0, len(spiders), self.batch_size)]
132
+
133
+ for batch_idx, batch in enumerate(batches):
134
+ batch_tasks = set()
135
+
136
+ for spider_cls in batch:
137
+ crawler = self._create_crawler(spider_cls)
138
+ self.crawlers.add(crawler)
139
+
140
+ await self.semaphore.acquire()
141
+ task = asyncio.create_task(self._run_crawler_with_semaphore(crawler))
142
+ batch_tasks.add(task)
143
+ self._active_spiders.add(task)
144
+
145
+ if len(spiders) > 1: # 仅对多爬虫显示批次信息
146
+ logger.info(f"启动第 {batch_idx + 1}/{len(batches)} 批爬虫,共 {len(batch)} 个")
147
+
148
+ await asyncio.gather(*batch_tasks)
149
+
150
+ if len(spiders) > 1: # 仅对多爬虫显示批次完成信息
151
+ logger.info(f"第 {batch_idx + 1} 批爬虫处理完成")
152
+
153
+ async def _run_crawler_with_semaphore(self, crawler):
154
+ """使用信号量控制的爬虫运行函数"""
155
+ try:
156
+ await crawler.crawl()
157
+ finally:
158
+ self.semaphore.release() # 确保资源释放
159
+
160
+ async def start(self):
161
+ """启动所有爬虫任务"""
162
+ if self._active_spiders:
163
+ logger.info(f"启动 {len(self._active_spiders)} 个爬虫任务,并发限制: {self.max_concurrency}")
164
+ await asyncio.gather(*self._active_spiders)
165
+
166
+ def _create_crawler(self, spider_cls) -> Crawler:
167
+ """创建爬虫实例"""
168
+ if isinstance(spider_cls, str):
169
+ raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
170
+ crawler: Crawler = Crawler(spider_cls, self.settings)
171
+ return crawler
172
+
173
+ def _shutdown(self, _signum, _frame):
174
+ """优雅关闭所有爬虫"""
175
+ logger.warning(f"收到关闭信号,正在优雅关闭 {len(self.crawlers)} 个爬虫...")
176
+ for crawler in self.crawlers:
177
+ if crawler.engine:
178
+ crawler.engine.running = False
179
+ crawler.engine.normal = False
180
+ crawler.stats.close_spider(crawler.spider, 'shutdown signal')
181
+
182
+ # 等待所有任务完成
183
+ asyncio.create_task(self._wait_for_tasks())
184
+
185
+ async def _wait_for_tasks(self):
186
+ """等待所有活跃任务完成"""
187
+ pending = [task for task in self._active_spiders if not task.done()]
188
+ if pending:
189
+ logger.info(f"等待 {len(pending)} 个活跃任务完成...")
190
+ await asyncio.gather(*pending)
191
+ logger.info("所有爬虫已优雅关闭")
192
+
193
+ @classmethod
194
+ def _get_default_settings(cls):
195
+ """框架自动获取默认配置"""
196
+ try:
197
+ return get_settings()
198
+ except ImportError:
199
+ return {}
200
+
201
+ # class CrawlerProcess:
202
+ #
203
+ # def __init__(self, settings=None):
204
+ # self.crawlers: Final[Set] = set()
205
+ # self._active_spiders: Final[Set] = set()
206
+ # self.settings = settings or self._get_default_settings()
207
+ #
208
+ # signal.signal(signal.SIGINT, self._shutdown)
209
+ #
210
+ # async def crawl(self, spider: Type[Spider]):
211
+ # crawler: Crawler = self._create_crawler(spider)
212
+ # self.crawlers.add(crawler)
213
+ # task = await self._crawl(crawler)
214
+ # self._active_spiders.add(task)
215
+ #
216
+ # @classmethod
217
+ # def _get_default_settings(cls):
218
+ # """自动获取默认配置"""
219
+ # try:
220
+ # return get_settings()
221
+ # except ImportError:
222
+ # return {}
223
+ #
224
+ # @staticmethod
225
+ # async def _crawl(crawler):
226
+ # return asyncio.create_task(crawler.crawl())
227
+ #
228
+ # async def start(self):
229
+ # await asyncio.gather(*self._active_spiders)
230
+ #
231
+ # def _create_crawler(self, spider_cls) -> Crawler:
232
+ # if isinstance(spider_cls, str):
233
+ # raise SpiderTypeError(f"{type(self)}.crawl args: String is not supported.")
234
+ # crawler: Crawler = Crawler(spider_cls, self.settings)
235
+ # return crawler
236
+ #
237
+ # def _shutdown(self, _signum, _frame):
238
+ # for crawler in self.crawlers:
239
+ # crawler.engine.running = False
240
+ # crawler.engine.normal = False
241
+ # crawler.stats.close_spider(crawler.spider, 'Ctrl C')
242
+ # logger.warning(f'spiders received: `Ctrl C` signal, closed.')
@@ -1,78 +1,78 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from abc import abstractmethod, ABCMeta
4
- from typing_extensions import Self
5
- from typing import Final, Set, Optional
6
- from contextlib import asynccontextmanager
7
-
8
- from crawlo import Response, Request
9
- from crawlo.utils.log import get_logger
10
- from crawlo.middleware.middleware_manager import MiddlewareManager
11
-
12
-
13
- class ActivateRequestManager:
14
-
15
- def __init__(self):
16
- self._active: Final[Set] = set()
17
-
18
- def add(self, request):
19
- self._active.add(request)
20
-
21
- def remove(self, request):
22
- self._active.remove(request)
23
-
24
- @asynccontextmanager
25
- async def __call__(self, request):
26
- try:
27
- yield self.add(request)
28
- finally:
29
- self.remove(request)
30
-
31
- def __len__(self):
32
- return len(self._active)
33
-
34
-
35
- class DownloaderMeta(ABCMeta):
36
- def __subclasscheck__(self, subclass):
37
- required_methods = ('fetch', 'download', 'create_instance', 'close')
38
- is_subclass = all(
39
- hasattr(subclass, method) and callable(getattr(subclass, method, None)) for method in required_methods
40
- )
41
- return is_subclass
42
-
43
-
44
- class DownloaderBase(metaclass=DownloaderMeta):
45
- def __init__(self, crawler):
46
- self.crawler = crawler
47
- self._active = ActivateRequestManager()
48
- self.middleware: Optional[MiddlewareManager] = None
49
- self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
50
-
51
- @classmethod
52
- def create_instance(cls, *args, **kwargs) -> Self:
53
- return cls(*args, **kwargs)
54
-
55
- def open(self) -> None:
56
- self.logger.info(
57
- f"{self.crawler.spider} <downloader class:{type(self).__name__}>"
58
- f"<concurrency:{self.crawler.settings.get_int('CONCURRENCY')}>"
59
- )
60
- self.middleware = MiddlewareManager.create_instance(self.crawler)
61
-
62
- async def fetch(self, request) -> Optional[Response]:
63
- async with self._active(request):
64
- response = await self.middleware.download(request)
65
- return response
66
-
67
- @abstractmethod
68
- async def download(self, request: Request) -> Response:
69
- pass
70
-
71
- async def close(self) -> None:
72
- pass
73
-
74
- def idle(self) -> bool:
75
- return len(self) == 0
76
-
77
- def __len__(self) -> int:
78
- return len(self._active)
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from abc import abstractmethod, ABCMeta
4
+ from typing_extensions import Self
5
+ from typing import Final, Set, Optional
6
+ from contextlib import asynccontextmanager
7
+
8
+ from crawlo import Response, Request
9
+ from crawlo.utils.log import get_logger
10
+ from crawlo.middleware.middleware_manager import MiddlewareManager
11
+
12
+
13
+ class ActivateRequestManager:
14
+
15
+ def __init__(self):
16
+ self._active: Final[Set] = set()
17
+
18
+ def add(self, request):
19
+ self._active.add(request)
20
+
21
+ def remove(self, request):
22
+ self._active.remove(request)
23
+
24
+ @asynccontextmanager
25
+ async def __call__(self, request):
26
+ try:
27
+ yield self.add(request)
28
+ finally:
29
+ self.remove(request)
30
+
31
+ def __len__(self):
32
+ return len(self._active)
33
+
34
+
35
+ class DownloaderMeta(ABCMeta):
36
+ def __subclasscheck__(self, subclass):
37
+ required_methods = ('fetch', 'download', 'create_instance', 'close')
38
+ is_subclass = all(
39
+ hasattr(subclass, method) and callable(getattr(subclass, method, None)) for method in required_methods
40
+ )
41
+ return is_subclass
42
+
43
+
44
+ class DownloaderBase(metaclass=DownloaderMeta):
45
+ def __init__(self, crawler):
46
+ self.crawler = crawler
47
+ self._active = ActivateRequestManager()
48
+ self.middleware: Optional[MiddlewareManager] = None
49
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
50
+
51
+ @classmethod
52
+ def create_instance(cls, *args, **kwargs) -> Self:
53
+ return cls(*args, **kwargs)
54
+
55
+ def open(self) -> None:
56
+ self.logger.info(
57
+ f"{self.crawler.spider} <downloader class:{type(self).__name__}>"
58
+ f"<concurrency:{self.crawler.settings.get_int('CONCURRENCY')}>"
59
+ )
60
+ self.middleware = MiddlewareManager.create_instance(self.crawler)
61
+
62
+ async def fetch(self, request) -> Optional[Response]:
63
+ async with self._active(request):
64
+ response = await self.middleware.download(request)
65
+ return response
66
+
67
+ @abstractmethod
68
+ async def download(self, request: Request) -> Response:
69
+ pass
70
+
71
+ async def close(self) -> None:
72
+ pass
73
+
74
+ def idle(self) -> bool:
75
+ return len(self) == 0
76
+
77
+ def __len__(self) -> int:
78
+ return len(self._active)