crawlo 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (80) hide show
  1. crawlo/__init__.py +9 -6
  2. crawlo/__version__.py +1 -2
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -59
  7. crawlo/crawler.py +242 -107
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +259 -96
  10. crawlo/downloader/httpx_downloader.py +187 -48
  11. crawlo/downloader/playwright_downloader.py +160 -160
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +64 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/filters/__init__.py +37 -37
  18. crawlo/filters/aioredis_filter.py +157 -129
  19. crawlo/filters/memory_filter.py +202 -203
  20. crawlo/filters/redis_filter.py +119 -119
  21. crawlo/items/__init__.py +62 -62
  22. crawlo/items/items.py +118 -118
  23. crawlo/middleware/__init__.py +21 -21
  24. crawlo/middleware/default_header.py +32 -32
  25. crawlo/middleware/download_delay.py +28 -28
  26. crawlo/middleware/middleware_manager.py +140 -140
  27. crawlo/middleware/request_ignore.py +30 -30
  28. crawlo/middleware/response_code.py +18 -18
  29. crawlo/middleware/response_filter.py +26 -26
  30. crawlo/middleware/retry.py +90 -89
  31. crawlo/network/__init__.py +7 -7
  32. crawlo/network/request.py +205 -155
  33. crawlo/network/response.py +166 -93
  34. crawlo/pipelines/__init__.py +13 -13
  35. crawlo/pipelines/console_pipeline.py +39 -39
  36. crawlo/pipelines/mongo_pipeline.py +116 -116
  37. crawlo/pipelines/mysql_batch_pipline.py +133 -133
  38. crawlo/pipelines/mysql_pipeline.py +195 -176
  39. crawlo/pipelines/pipeline_manager.py +56 -56
  40. crawlo/settings/__init__.py +7 -7
  41. crawlo/settings/default_settings.py +93 -89
  42. crawlo/settings/setting_manager.py +99 -99
  43. crawlo/spider/__init__.py +36 -36
  44. crawlo/stats_collector.py +59 -47
  45. crawlo/subscriber.py +106 -27
  46. crawlo/task_manager.py +27 -27
  47. crawlo/templates/item_template.tmpl +21 -21
  48. crawlo/templates/project_template/main.py +32 -32
  49. crawlo/templates/project_template/setting.py +189 -189
  50. crawlo/templates/spider_template.tmpl +30 -30
  51. crawlo/utils/__init__.py +7 -7
  52. crawlo/utils/concurrency_manager.py +125 -0
  53. crawlo/utils/date_tools.py +177 -177
  54. crawlo/utils/func_tools.py +82 -82
  55. crawlo/utils/log.py +39 -39
  56. crawlo/utils/pqueue.py +173 -173
  57. crawlo/utils/project.py +59 -59
  58. crawlo/utils/request.py +122 -85
  59. crawlo/utils/system.py +11 -11
  60. crawlo/utils/tools.py +303 -0
  61. crawlo/utils/url.py +39 -39
  62. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/METADATA +48 -36
  63. crawlo-1.0.3.dist-info/RECORD +80 -0
  64. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/top_level.txt +1 -0
  65. tests/__init__.py +7 -0
  66. tests/baidu_spider/__init__.py +7 -0
  67. tests/baidu_spider/demo.py +94 -0
  68. tests/baidu_spider/items.py +25 -0
  69. tests/baidu_spider/middleware.py +49 -0
  70. tests/baidu_spider/pipeline.py +55 -0
  71. tests/baidu_spider/request_fingerprints.txt +9 -0
  72. tests/baidu_spider/run.py +27 -0
  73. tests/baidu_spider/settings.py +78 -0
  74. tests/baidu_spider/spiders/__init__.py +7 -0
  75. tests/baidu_spider/spiders/bai_du.py +61 -0
  76. tests/baidu_spider/spiders/sina.py +79 -0
  77. crawlo-1.0.1.dist-info/RECORD +0 -67
  78. crawlo-1.0.1.dist-info/licenses/LICENSE +0 -23
  79. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/WHEEL +0 -0
  80. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/entry_points.txt +0 -0
crawlo/__init__.py CHANGED
@@ -1,6 +1,9 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from crawlo.network.request import Request
4
- from crawlo.network.response import Response
5
- from crawlo.items.items import Item
6
- from .__version__ import __version__
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ # from crawlo.spider import Spider
4
+ from crawlo.items.items import Item
5
+ from crawlo.network.request import Request
6
+ from crawlo.network.response import Response
7
+ from crawlo.downloader import DownloaderBase
8
+ from crawlo.middleware import BaseMiddleware
9
+ from .__version__ import __version__
crawlo/__version__.py CHANGED
@@ -1,2 +1 @@
1
-
2
- __version__ = "1.0.1"
1
+ __version__ = "1.0.3"
crawlo/core/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
crawlo/core/engine.py CHANGED
@@ -1,159 +1,159 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import asyncio
4
- from inspect import iscoroutine
5
- from typing import Optional, Generator, Callable
6
-
7
- from crawlo import Request, Item
8
- from crawlo.spider import Spider
9
- from crawlo.utils.log import get_logger
10
- from crawlo.exceptions import OutputError
11
- from crawlo.core.scheduler import Scheduler
12
- from crawlo.core.processor import Processor
13
- from crawlo.task_manager import TaskManager
14
- from crawlo.utils.project import load_class
15
- from crawlo.downloader import DownloaderBase
16
- from crawlo.utils.func_tools import transform
17
- from crawlo.event import spider_opened, spider_error, request_scheduled
18
-
19
-
20
- class Engine(object):
21
-
22
- def __init__(self, crawler):
23
- self.running = False
24
- self.normal = True
25
- self.crawler = crawler
26
- self.settings = crawler.settings
27
- self.spider: Optional[Spider] = None
28
- self.downloader: Optional[DownloaderBase] = None
29
- self.scheduler: Optional[Scheduler] = None
30
- self.processor: Optional[Processor] = None
31
- self.start_requests: Optional[Generator] = None
32
- self.task_manager: Optional[TaskManager] = TaskManager(self.settings.get_int('CONCURRENCY'))
33
-
34
- self.logger = get_logger(name=self.__class__.__name__)
35
-
36
- def _get_downloader_cls(self):
37
- downloader_cls = load_class(self.settings.get('DOWNLOADER'))
38
- if not issubclass(downloader_cls, DownloaderBase):
39
- raise TypeError(f'Downloader {downloader_cls.__name__} is not subclass of DownloaderBase.')
40
- return downloader_cls
41
-
42
- def engine_start(self):
43
- self.running = True
44
- self.logger.info(
45
- f"Crawlo (version {self.settings.get_int('VERSION')}) started. "
46
- f"(project name : {self.settings.get('PROJECT_NAME')})"
47
- )
48
-
49
- async def start_spider(self, spider):
50
- self.spider = spider
51
-
52
- self.scheduler = Scheduler.create_instance(self.crawler)
53
- if hasattr(self.scheduler, 'open'):
54
- self.scheduler.open()
55
-
56
- downloader_cls = self._get_downloader_cls()
57
- self.downloader = downloader_cls(self.crawler)
58
- if hasattr(self.downloader, 'open'):
59
- self.downloader.open()
60
-
61
- self.processor = Processor(self.crawler)
62
- if hasattr(self.processor, 'open'):
63
- self.processor.open()
64
-
65
- self.start_requests = iter(spider.start_requests())
66
- await self._open_spider()
67
-
68
- async def crawl(self):
69
- """
70
- Crawl the spider
71
- """
72
- while self.running:
73
- if request := await self._get_next_request():
74
- await self._crawl(request)
75
- try:
76
- start_request = next(self.start_requests)
77
- except StopIteration:
78
- self.start_requests = None
79
- except Exception as exp:
80
- # 1、发去请求的request全部运行完毕
81
- # 2、调度器是否空闲
82
- # 3、下载器是否空闲
83
- if not await self._exit():
84
- continue
85
- self.running = False
86
- if self.start_requests is not None:
87
- self.logger.error(f"启动请求时发生错误: {str(exp)}")
88
- else:
89
- # 请求入队
90
- await self.enqueue_request(start_request)
91
-
92
- if not self.running:
93
- await self.close_spider()
94
-
95
- async def _open_spider(self):
96
- asyncio.create_task(self.crawler.subscriber.notify(spider_opened))
97
- crawling = asyncio.create_task(self.crawl())
98
- await crawling
99
-
100
- async def _crawl(self, request):
101
- # TODO 实现并发
102
- async def crawl_task():
103
- outputs = await self._fetch(request)
104
- # TODO 处理output
105
- if outputs:
106
- await self._handle_spider_output(outputs)
107
-
108
- # asyncio.create_task(crawl_task())
109
- self.task_manager.create_task(crawl_task())
110
-
111
- async def _fetch(self, request):
112
- async def _successful(_response):
113
- callback: Callable = request.callback or self.spider.parse
114
- if _outputs := callback(_response):
115
- if iscoroutine(_outputs):
116
- await _outputs
117
- else:
118
- return transform(_outputs, _response)
119
-
120
- _response = await self.downloader.fetch(request)
121
- if _response is None:
122
- return None
123
- output = await _successful(_response)
124
- return output
125
-
126
- async def enqueue_request(self, start_request):
127
- await self._schedule_request(start_request)
128
-
129
- async def _schedule_request(self, request):
130
- # TODO 去重
131
- if await self.scheduler.enqueue_request(request):
132
- asyncio.create_task(self.crawler.subscriber.notify(request_scheduled, request, self.crawler.spider))
133
-
134
- async def _get_next_request(self):
135
- return await self.scheduler.next_request()
136
-
137
- async def _handle_spider_output(self, outputs):
138
- async for spider_output in outputs:
139
- if isinstance(spider_output, (Request, Item)):
140
- await self.processor.enqueue(spider_output)
141
- elif isinstance(spider_output, Exception):
142
- asyncio.create_task(
143
- self.crawler.subscriber.notify(spider_error, spider_output, self.spider)
144
- )
145
- raise spider_output
146
- else:
147
- raise OutputError(f'{type(self.spider)} must return `Request` or `Item`.')
148
-
149
- async def _exit(self):
150
- if self.scheduler.idle() and self.downloader.idle() and self.task_manager.all_done() and self.processor.idle():
151
- return True
152
- return False
153
-
154
- async def close_spider(self):
155
- await asyncio.gather(*self.task_manager.current_task)
156
- await self.scheduler.close()
157
- await self.downloader.close()
158
- if self.normal:
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ from inspect import iscoroutine
5
+ from typing import Optional, Generator, Callable
6
+
7
+ from crawlo import Request, Item
8
+ from crawlo.spider import Spider
9
+ from crawlo.utils.log import get_logger
10
+ from crawlo.exceptions import OutputError
11
+ from crawlo.core.scheduler import Scheduler
12
+ from crawlo.core.processor import Processor
13
+ from crawlo.task_manager import TaskManager
14
+ from crawlo.utils.project import load_class
15
+ from crawlo.downloader import DownloaderBase
16
+ from crawlo.utils.func_tools import transform
17
+ from crawlo.event import spider_opened, spider_error, request_scheduled
18
+
19
+
20
+ class Engine(object):
21
+
22
+ def __init__(self, crawler):
23
+ self.running = False
24
+ self.normal = True
25
+ self.crawler = crawler
26
+ self.settings = crawler.settings
27
+ self.spider: Optional[Spider] = None
28
+ self.downloader: Optional[DownloaderBase] = None
29
+ self.scheduler: Optional[Scheduler] = None
30
+ self.processor: Optional[Processor] = None
31
+ self.start_requests: Optional[Generator] = None
32
+ self.task_manager: Optional[TaskManager] = TaskManager(self.settings.get_int('CONCURRENCY'))
33
+
34
+ self.logger = get_logger(name=self.__class__.__name__)
35
+
36
+ def _get_downloader_cls(self):
37
+ downloader_cls = load_class(self.settings.get('DOWNLOADER'))
38
+ if not issubclass(downloader_cls, DownloaderBase):
39
+ raise TypeError(f'Downloader {downloader_cls.__name__} is not subclass of DownloaderBase.')
40
+ return downloader_cls
41
+
42
+ def engine_start(self):
43
+ self.running = True
44
+ self.logger.info(
45
+ f"Crawlo (version {self.settings.get_int('VERSION')}) started. "
46
+ f"(project name : {self.settings.get('PROJECT_NAME')})"
47
+ )
48
+
49
+ async def start_spider(self, spider):
50
+ self.spider = spider
51
+
52
+ self.scheduler = Scheduler.create_instance(self.crawler)
53
+ if hasattr(self.scheduler, 'open'):
54
+ self.scheduler.open()
55
+
56
+ downloader_cls = self._get_downloader_cls()
57
+ self.downloader = downloader_cls(self.crawler)
58
+ if hasattr(self.downloader, 'open'):
59
+ self.downloader.open()
60
+
61
+ self.processor = Processor(self.crawler)
62
+ if hasattr(self.processor, 'open'):
63
+ self.processor.open()
64
+
65
+ self.start_requests = iter(spider.start_requests())
66
+ await self._open_spider()
67
+
68
+ async def crawl(self):
69
+ """
70
+ Crawl the spider
71
+ """
72
+ while self.running:
73
+ if request := await self._get_next_request():
74
+ await self._crawl(request)
75
+ try:
76
+ start_request = next(self.start_requests)
77
+ except StopIteration:
78
+ self.start_requests = None
79
+ except Exception as exp:
80
+ # 1、发去请求的request全部运行完毕
81
+ # 2、调度器是否空闲
82
+ # 3、下载器是否空闲
83
+ if not await self._exit():
84
+ continue
85
+ self.running = False
86
+ if self.start_requests is not None:
87
+ self.logger.error(f"启动请求时发生错误: {str(exp)}")
88
+ else:
89
+ # 请求入队
90
+ await self.enqueue_request(start_request)
91
+
92
+ if not self.running:
93
+ await self.close_spider()
94
+
95
+ async def _open_spider(self):
96
+ asyncio.create_task(self.crawler.subscriber.notify(spider_opened))
97
+ crawling = asyncio.create_task(self.crawl())
98
+ await crawling
99
+
100
+ async def _crawl(self, request):
101
+ # TODO 实现并发
102
+ async def crawl_task():
103
+ outputs = await self._fetch(request)
104
+ # TODO 处理output
105
+ if outputs:
106
+ await self._handle_spider_output(outputs)
107
+
108
+ # asyncio.create_task(crawl_task())
109
+ self.task_manager.create_task(crawl_task())
110
+
111
+ async def _fetch(self, request):
112
+ async def _successful(_response):
113
+ callback: Callable = request.callback or self.spider.parse
114
+ if _outputs := callback(_response):
115
+ if iscoroutine(_outputs):
116
+ await _outputs
117
+ else:
118
+ return transform(_outputs, _response)
119
+
120
+ _response = await self.downloader.fetch(request)
121
+ if _response is None:
122
+ return None
123
+ output = await _successful(_response)
124
+ return output
125
+
126
+ async def enqueue_request(self, start_request):
127
+ await self._schedule_request(start_request)
128
+
129
+ async def _schedule_request(self, request):
130
+ # TODO 去重
131
+ if await self.scheduler.enqueue_request(request):
132
+ asyncio.create_task(self.crawler.subscriber.notify(request_scheduled, request, self.crawler.spider))
133
+
134
+ async def _get_next_request(self):
135
+ return await self.scheduler.next_request()
136
+
137
+ async def _handle_spider_output(self, outputs):
138
+ async for spider_output in outputs:
139
+ if isinstance(spider_output, (Request, Item)):
140
+ await self.processor.enqueue(spider_output)
141
+ elif isinstance(spider_output, Exception):
142
+ asyncio.create_task(
143
+ self.crawler.subscriber.notify(spider_error, spider_output, self.spider)
144
+ )
145
+ raise spider_output
146
+ else:
147
+ raise OutputError(f'{type(self.spider)} must return `Request` or `Item`.')
148
+
149
+ async def _exit(self):
150
+ if self.scheduler.idle() and self.downloader.idle() and self.task_manager.all_done() and self.processor.idle():
151
+ return True
152
+ return False
153
+
154
+ async def close_spider(self):
155
+ await asyncio.gather(*self.task_manager.current_task)
156
+ await self.scheduler.close()
157
+ await self.downloader.close()
158
+ if self.normal:
159
159
  await self.crawler.close()
crawlo/core/processor.py CHANGED
@@ -1,40 +1,40 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from asyncio import Queue
4
- from typing import Union, Optional
5
-
6
- from crawlo import Request, Item
7
- from crawlo.pipelines.pipeline_manager import PipelineManager
8
-
9
-
10
- class Processor(object):
11
-
12
- def __init__(self, crawler):
13
- self.crawler = crawler
14
- self.queue: Queue = Queue()
15
- self.pipelines: Optional[PipelineManager] = None
16
-
17
- def open(self):
18
- self.pipelines = PipelineManager.from_crawler(self.crawler)
19
-
20
- async def process(self):
21
- while not self.idle():
22
- result = await self.queue.get()
23
- if isinstance(result, Request):
24
- await self.crawler.engine.enqueue_request(result)
25
- else:
26
- assert isinstance(result, Item)
27
- await self._process_item(result)
28
-
29
- async def _process_item(self, item):
30
- await self.pipelines.process_item(item=item)
31
-
32
- async def enqueue(self, output: Union[Request, Item]):
33
- await self.queue.put(output)
34
- await self.process()
35
-
36
- def idle(self) -> bool:
37
- return len(self) == 0
38
-
39
- def __len__(self):
40
- return self.queue.qsize()
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from asyncio import Queue
4
+ from typing import Union, Optional
5
+
6
+ from crawlo import Request, Item
7
+ from crawlo.pipelines.pipeline_manager import PipelineManager
8
+
9
+
10
+ class Processor(object):
11
+
12
+ def __init__(self, crawler):
13
+ self.crawler = crawler
14
+ self.queue: Queue = Queue()
15
+ self.pipelines: Optional[PipelineManager] = None
16
+
17
+ def open(self):
18
+ self.pipelines = PipelineManager.from_crawler(self.crawler)
19
+
20
+ async def process(self):
21
+ while not self.idle():
22
+ result = await self.queue.get()
23
+ if isinstance(result, Request):
24
+ await self.crawler.engine.enqueue_request(result)
25
+ else:
26
+ assert isinstance(result, Item)
27
+ await self._process_item(result)
28
+
29
+ async def _process_item(self, item):
30
+ await self.pipelines.process_item(item=item)
31
+
32
+ async def enqueue(self, output: Union[Request, Item]):
33
+ await self.queue.put(output)
34
+ await self.process()
35
+
36
+ def idle(self) -> bool:
37
+ return len(self) == 0
38
+
39
+ def __len__(self):
40
+ return self.queue.qsize()
crawlo/core/scheduler.py CHANGED
@@ -1,59 +1,57 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Optional, Callable
4
-
5
- from crawlo.utils.log import get_logger
6
- from crawlo.utils.request import set_request
7
- from crawlo.utils.pqueue import SpiderPriorityQueue
8
- from crawlo.utils.project import load_class, common_call
9
-
10
-
11
- class Scheduler:
12
- def __init__(self, crawler, dupe_filter, stats, log_level, priority):
13
- self.crawler = crawler
14
- self.request_queue: Optional[SpiderPriorityQueue] = None
15
-
16
- # self.item_count = 0
17
- # self.response_count = 0
18
- self.logger = get_logger(name=self.__class__.__name__, level=log_level)
19
- self.stats = stats
20
- self.dupe_filter = dupe_filter
21
- self.priority = priority
22
-
23
- @classmethod
24
- def create_instance(cls, crawler):
25
- filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
26
- o = cls(
27
- crawler=crawler,
28
- dupe_filter=filter_cls.create_instance(crawler),
29
- stats=crawler.stats,
30
- log_level=crawler.settings.get('LOG_LEVEL'),
31
- priority=crawler.settings.get('DEPTH_PRIORITY')
32
- )
33
- return o
34
-
35
- def open(self):
36
- self.request_queue = SpiderPriorityQueue()
37
- self.logger.info(f'requesting filter: {self.dupe_filter}')
38
-
39
- async def next_request(self):
40
- request = await self.request_queue.get()
41
- return request
42
-
43
- async def enqueue_request(self, request):
44
- if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
45
- self.dupe_filter.log_stats(request)
46
- return False
47
- set_request(request, self.priority)
48
- await self.request_queue.put(request)
49
- return True
50
-
51
- def idle(self) -> bool:
52
- return len(self) == 0
53
-
54
- async def close(self):
55
- if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
56
- await closed()
57
-
58
- def __len__(self):
59
- return self.request_queue.qsize()
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Optional, Callable
4
+
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.utils.request import set_request
7
+ from crawlo.utils.pqueue import SpiderPriorityQueue
8
+ from crawlo.utils.project import load_class, common_call
9
+
10
+
11
+ class Scheduler:
12
+ def __init__(self, crawler, dupe_filter, stats, log_level, priority):
13
+ self.crawler = crawler
14
+ self.request_queue: Optional[SpiderPriorityQueue] = None
15
+
16
+ self.logger = get_logger(name=self.__class__.__name__, level=log_level)
17
+ self.stats = stats
18
+ self.dupe_filter = dupe_filter
19
+ self.priority = priority
20
+
21
+ @classmethod
22
+ def create_instance(cls, crawler):
23
+ filter_cls = load_class(crawler.settings.get('FILTER_CLASS'))
24
+ o = cls(
25
+ crawler=crawler,
26
+ dupe_filter=filter_cls.create_instance(crawler),
27
+ stats=crawler.stats,
28
+ log_level=crawler.settings.get('LOG_LEVEL'),
29
+ priority=crawler.settings.get('DEPTH_PRIORITY')
30
+ )
31
+ return o
32
+
33
+ def open(self):
34
+ self.request_queue = SpiderPriorityQueue()
35
+ self.logger.info(f'requesting filter: {self.dupe_filter}')
36
+
37
+ async def next_request(self):
38
+ request = await self.request_queue.get()
39
+ return request
40
+
41
+ async def enqueue_request(self, request):
42
+ if not request.dont_filter and await common_call(self.dupe_filter.requested, request):
43
+ self.dupe_filter.log_stats(request)
44
+ return False
45
+ set_request(request, self.priority)
46
+ await self.request_queue.put(request)
47
+ return True
48
+
49
+ def idle(self) -> bool:
50
+ return len(self) == 0
51
+
52
+ async def close(self):
53
+ if isinstance(closed := getattr(self.dupe_filter, 'closed', None), Callable):
54
+ await closed()
55
+
56
+ def __len__(self):
57
+ return self.request_queue.qsize()