aio-scrapy 2.1.3__py3-none-any.whl → 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aio_scrapy-2.1.3.dist-info → aio_scrapy-2.1.6.dist-info}/LICENSE +1 -1
- {aio_scrapy-2.1.3.dist-info → aio_scrapy-2.1.6.dist-info}/METADATA +53 -40
- aio_scrapy-2.1.6.dist-info/RECORD +134 -0
- {aio_scrapy-2.1.3.dist-info → aio_scrapy-2.1.6.dist-info}/WHEEL +1 -1
- aioscrapy/VERSION +1 -1
- aioscrapy/cmdline.py +438 -5
- aioscrapy/core/downloader/__init__.py +523 -18
- aioscrapy/core/downloader/handlers/__init__.py +188 -6
- aioscrapy/core/downloader/handlers/aiohttp.py +188 -4
- aioscrapy/core/downloader/handlers/curl_cffi.py +125 -4
- aioscrapy/core/downloader/handlers/httpx.py +134 -4
- aioscrapy/core/downloader/handlers/pyhttpx.py +133 -4
- aioscrapy/core/downloader/handlers/requests.py +121 -3
- aioscrapy/core/downloader/handlers/webdriver/__init__.py +2 -0
- aioscrapy/core/downloader/handlers/webdriver/drissionpage.py +493 -0
- aioscrapy/core/downloader/handlers/webdriver/driverpool.py +234 -0
- aioscrapy/core/downloader/handlers/webdriver/playwright.py +498 -0
- aioscrapy/core/engine.py +381 -20
- aioscrapy/core/scheduler.py +350 -36
- aioscrapy/core/scraper.py +509 -33
- aioscrapy/crawler.py +392 -10
- aioscrapy/db/__init__.py +149 -0
- aioscrapy/db/absmanager.py +212 -6
- aioscrapy/db/aiomongo.py +292 -10
- aioscrapy/db/aiomysql.py +363 -10
- aioscrapy/db/aiopg.py +299 -2
- aioscrapy/db/aiorabbitmq.py +444 -4
- aioscrapy/db/aioredis.py +260 -11
- aioscrapy/dupefilters/__init__.py +110 -5
- aioscrapy/dupefilters/disk.py +124 -2
- aioscrapy/dupefilters/redis.py +598 -32
- aioscrapy/exceptions.py +170 -14
- aioscrapy/http/__init__.py +1 -1
- aioscrapy/http/headers.py +237 -3
- aioscrapy/http/request/__init__.py +257 -11
- aioscrapy/http/request/form.py +83 -3
- aioscrapy/http/request/json_request.py +121 -9
- aioscrapy/http/response/__init__.py +306 -33
- aioscrapy/http/response/html.py +42 -3
- aioscrapy/http/response/text.py +496 -49
- aioscrapy/http/response/web_driver.py +144 -0
- aioscrapy/http/response/xml.py +45 -3
- aioscrapy/libs/downloader/defaultheaders.py +66 -2
- aioscrapy/libs/downloader/downloadtimeout.py +91 -2
- aioscrapy/libs/downloader/ja3fingerprint.py +95 -2
- aioscrapy/libs/downloader/retry.py +193 -7
- aioscrapy/libs/downloader/stats.py +142 -0
- aioscrapy/libs/downloader/useragent.py +93 -2
- aioscrapy/libs/extensions/closespider.py +166 -4
- aioscrapy/libs/extensions/corestats.py +151 -1
- aioscrapy/libs/extensions/logstats.py +145 -1
- aioscrapy/libs/extensions/metric.py +370 -1
- aioscrapy/libs/extensions/throttle.py +235 -1
- aioscrapy/libs/pipelines/__init__.py +345 -2
- aioscrapy/libs/pipelines/csv.py +242 -0
- aioscrapy/libs/pipelines/excel.py +545 -0
- aioscrapy/libs/pipelines/mongo.py +132 -0
- aioscrapy/libs/pipelines/mysql.py +67 -0
- aioscrapy/libs/pipelines/pg.py +67 -0
- aioscrapy/libs/spider/depth.py +141 -3
- aioscrapy/libs/spider/httperror.py +144 -4
- aioscrapy/libs/spider/offsite.py +202 -2
- aioscrapy/libs/spider/referer.py +396 -21
- aioscrapy/libs/spider/urllength.py +97 -1
- aioscrapy/link.py +115 -8
- aioscrapy/logformatter.py +199 -8
- aioscrapy/middleware/absmanager.py +328 -2
- aioscrapy/middleware/downloader.py +218 -0
- aioscrapy/middleware/extension.py +50 -1
- aioscrapy/middleware/itempipeline.py +96 -0
- aioscrapy/middleware/spider.py +360 -7
- aioscrapy/process.py +200 -0
- aioscrapy/proxy/__init__.py +142 -3
- aioscrapy/proxy/redis.py +136 -2
- aioscrapy/queue/__init__.py +168 -16
- aioscrapy/scrapyd/runner.py +124 -3
- aioscrapy/serializer.py +182 -2
- aioscrapy/settings/__init__.py +610 -128
- aioscrapy/settings/default_settings.py +313 -13
- aioscrapy/signalmanager.py +151 -20
- aioscrapy/signals.py +183 -1
- aioscrapy/spiderloader.py +165 -12
- aioscrapy/spiders/__init__.py +233 -6
- aioscrapy/statscollectors.py +312 -1
- aioscrapy/utils/conf.py +345 -17
- aioscrapy/utils/curl.py +168 -16
- aioscrapy/utils/decorators.py +76 -6
- aioscrapy/utils/deprecate.py +212 -19
- aioscrapy/utils/httpobj.py +55 -3
- aioscrapy/utils/log.py +79 -0
- aioscrapy/utils/misc.py +189 -21
- aioscrapy/utils/ossignal.py +67 -5
- aioscrapy/utils/project.py +165 -3
- aioscrapy/utils/python.py +254 -44
- aioscrapy/utils/reqser.py +75 -1
- aioscrapy/utils/request.py +173 -12
- aioscrapy/utils/response.py +91 -6
- aioscrapy/utils/signal.py +196 -14
- aioscrapy/utils/spider.py +51 -4
- aioscrapy/utils/template.py +93 -6
- aioscrapy/utils/tools.py +191 -17
- aioscrapy/utils/trackref.py +198 -12
- aioscrapy/utils/url.py +341 -36
- aio_scrapy-2.1.3.dist-info/RECORD +0 -133
- aioscrapy/core/downloader/handlers/playwright/__init__.py +0 -110
- aioscrapy/core/downloader/handlers/playwright/driverpool.py +0 -53
- aioscrapy/core/downloader/handlers/playwright/webdriver.py +0 -96
- aioscrapy/http/response/playwright.py +0 -36
- aioscrapy/libs/pipelines/execl.py +0 -169
- {aio_scrapy-2.1.3.dist-info → aio_scrapy-2.1.6.dist-info}/entry_points.txt +0 -0
- {aio_scrapy-2.1.3.dist-info → aio_scrapy-2.1.6.dist-info}/top_level.txt +0 -0
aioscrapy/utils/url.py
CHANGED
|
@@ -1,9 +1,17 @@
|
|
|
1
1
|
"""
|
|
2
|
+
URL utility functions for aioscrapy.
|
|
3
|
+
aioscrapy的URL实用函数。
|
|
4
|
+
|
|
2
5
|
This module contains general purpose URL functions not found in the standard
|
|
3
|
-
library.
|
|
6
|
+
library. It provides utilities for URL parsing, manipulation, and validation
|
|
7
|
+
specific to web crawling needs.
|
|
8
|
+
此模块包含标准库中没有的通用URL函数。
|
|
9
|
+
它提供了特定于网络爬取需求的URL解析、操作和验证实用工具。
|
|
4
10
|
|
|
5
11
|
Some of the functions that used to be imported from this module have been moved
|
|
6
12
|
to the w3lib.url module. Always import those from there instead.
|
|
13
|
+
以前从此模块导入的一些函数已移至w3lib.url模块。
|
|
14
|
+
始终从那里导入这些函数。
|
|
7
15
|
"""
|
|
8
16
|
import posixpath
|
|
9
17
|
import re
|
|
@@ -11,72 +19,242 @@ from urllib.parse import ParseResult, urldefrag, urlparse, urlunparse
|
|
|
11
19
|
|
|
12
20
|
# scrapy.utils.url was moved to w3lib.url and import * ensures this
|
|
13
21
|
# move doesn't break old code
|
|
14
|
-
from w3lib.url import *
|
|
22
|
+
from w3lib.url import * # This imports functions like any_to_uri, add_or_replace_parameter, etc.
|
|
15
23
|
from w3lib.url import _safe_chars, _unquotepath # noqa: F401
|
|
16
24
|
from aioscrapy.utils.python import to_unicode
|
|
17
25
|
|
|
18
26
|
|
|
19
27
|
def url_is_from_any_domain(url, domains):
|
|
20
|
-
"""
|
|
28
|
+
"""
|
|
29
|
+
Check if a URL belongs to any of the given domains.
|
|
30
|
+
检查URL是否属于给定域名中的任何一个。
|
|
31
|
+
|
|
32
|
+
This function checks if the host part of the URL exactly matches any of the
|
|
33
|
+
given domains, or if it is a subdomain of any of them. The comparison is
|
|
34
|
+
case-insensitive.
|
|
35
|
+
此函数检查URL的主机部分是否与给定域名中的任何一个完全匹配,
|
|
36
|
+
或者它是否是其中任何一个的子域。比较不区分大小写。
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
url: The URL to check. Can be a string or a ParseResult object.
|
|
40
|
+
要检查的URL。可以是字符串或ParseResult对象。
|
|
41
|
+
domains: A list of domain names to check against.
|
|
42
|
+
要检查的域名列表。
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
bool: True if the URL belongs to any of the given domains, False otherwise.
|
|
46
|
+
如果URL属于给定域名中的任何一个,则为True,否则为False。
|
|
47
|
+
|
|
48
|
+
Examples:
|
|
49
|
+
>>> url_is_from_any_domain("http://www.example.com/some/page.html", ["example.com"])
|
|
50
|
+
True
|
|
51
|
+
>>> url_is_from_any_domain("http://sub.example.com/", ["example.com"])
|
|
52
|
+
True
|
|
53
|
+
>>> url_is_from_any_domain("http://example.org/", ["example.com"])
|
|
54
|
+
False
|
|
55
|
+
"""
|
|
56
|
+
# Get the host part of the URL and convert to lowercase
|
|
57
|
+
# 获取URL的主机部分并转换为小写
|
|
21
58
|
host = parse_url(url).netloc.lower()
|
|
59
|
+
|
|
60
|
+
# If there's no host, it's not from any domain
|
|
61
|
+
# 如果没有主机,则不属于任何域名
|
|
22
62
|
if not host:
|
|
23
63
|
return False
|
|
64
|
+
|
|
65
|
+
# Convert all domains to lowercase for case-insensitive comparison
|
|
66
|
+
# 将所有域名转换为小写以进行不区分大小写的比较
|
|
24
67
|
domains = [d.lower() for d in domains]
|
|
68
|
+
|
|
69
|
+
# Check if the host exactly matches any domain or is a subdomain of any domain
|
|
70
|
+
# 检查主机是否与任何域名完全匹配或是任何域名的子域
|
|
25
71
|
return any((host == d) or (host.endswith(f'.{d}')) for d in domains)
|
|
26
72
|
|
|
27
73
|
|
|
28
74
|
def url_is_from_spider(url, spider):
|
|
29
|
-
"""
|
|
75
|
+
"""
|
|
76
|
+
Check if a URL belongs to the given spider.
|
|
77
|
+
检查URL是否属于给定的爬虫。
|
|
78
|
+
|
|
79
|
+
This function checks if the URL belongs to the domains that the spider
|
|
80
|
+
is allowed to crawl. It considers both the spider's name and its
|
|
81
|
+
'allowed_domains' attribute (if it exists).
|
|
82
|
+
此函数检查URL是否属于爬虫允许爬取的域名。
|
|
83
|
+
它同时考虑爬虫的名称和其'allowed_domains'属性(如果存在)。
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
url: The URL to check. Can be a string or a ParseResult object.
|
|
87
|
+
要检查的URL。可以是字符串或ParseResult对象。
|
|
88
|
+
spider: The spider object to check against.
|
|
89
|
+
要检查的爬虫对象。
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
bool: True if the URL belongs to the spider's domains, False otherwise.
|
|
93
|
+
如果URL属于爬虫的域名,则为True,否则为False。
|
|
94
|
+
"""
|
|
95
|
+
# Check if the URL belongs to either the spider's name or any of its allowed domains
|
|
96
|
+
# 检查URL是否属于爬虫的名称或其任何允许的域名
|
|
30
97
|
return url_is_from_any_domain(url, [spider.name] + list(getattr(spider, 'allowed_domains', [])))
|
|
31
98
|
|
|
32
99
|
|
|
33
100
|
def url_has_any_extension(url, extensions):
|
|
101
|
+
"""
|
|
102
|
+
Check if a URL has any of the given extensions.
|
|
103
|
+
检查URL是否具有给定扩展名中的任何一个。
|
|
104
|
+
|
|
105
|
+
This function extracts the file extension from the URL path and checks
|
|
106
|
+
if it matches any of the provided extensions. The comparison is case-insensitive.
|
|
107
|
+
此函数从URL路径中提取文件扩展名,并检查它是否与提供的任何扩展名匹配。
|
|
108
|
+
比较不区分大小写。
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
url: The URL to check. Can be a string or a ParseResult object.
|
|
112
|
+
要检查的URL。可以是字符串或ParseResult对象。
|
|
113
|
+
extensions: A list of file extensions to check against (including the dot).
|
|
114
|
+
要检查的文件扩展名列表(包括点)。
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
bool: True if the URL has any of the given extensions, False otherwise.
|
|
118
|
+
如果URL具有给定扩展名中的任何一个,则为True,否则为False。
|
|
119
|
+
|
|
120
|
+
Examples:
|
|
121
|
+
>>> url_has_any_extension("http://example.com/file.pdf", ['.pdf', '.doc'])
|
|
122
|
+
True
|
|
123
|
+
>>> url_has_any_extension("http://example.com/file.PDF", ['.pdf'])
|
|
124
|
+
True
|
|
125
|
+
>>> url_has_any_extension("http://example.com/file.txt", ['.pdf', '.doc'])
|
|
126
|
+
False
|
|
127
|
+
"""
|
|
128
|
+
# Extract the file extension from the URL path and check if it's in the list
|
|
129
|
+
# 从URL路径中提取文件扩展名,并检查它是否在列表中
|
|
34
130
|
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
|
|
35
131
|
|
|
36
132
|
|
|
37
133
|
def parse_url(url, encoding=None):
|
|
38
|
-
"""Return urlparsed url from the given argument (which could be an already
|
|
39
|
-
parsed url)
|
|
40
134
|
"""
|
|
135
|
+
Parse a URL into its components.
|
|
136
|
+
将URL解析为其组成部分。
|
|
137
|
+
|
|
138
|
+
This function parses a URL into its components using urllib.parse.urlparse.
|
|
139
|
+
If the input is already a ParseResult object, it is returned unchanged.
|
|
140
|
+
If the input is a string or bytes, it is first converted to unicode.
|
|
141
|
+
此函数使用urllib.parse.urlparse将URL解析为其组成部分。
|
|
142
|
+
如果输入已经是ParseResult对象,则原样返回。
|
|
143
|
+
如果输入是字符串或字节,则首先将其转换为unicode。
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
url: The URL to parse. Can be a string, bytes, or ParseResult object.
|
|
147
|
+
要解析的URL。可以是字符串、字节或ParseResult对象。
|
|
148
|
+
encoding: The encoding to use for decoding bytes. Defaults to 'utf-8'.
|
|
149
|
+
用于解码字节的编码。默认为'utf-8'。
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
ParseResult: A named tuple with URL components: scheme, netloc, path,
|
|
153
|
+
params, query, and fragment.
|
|
154
|
+
包含URL组件的命名元组:scheme、netloc、path、
|
|
155
|
+
params、query和fragment。
|
|
156
|
+
"""
|
|
157
|
+
# If the URL is already parsed, return it as is
|
|
158
|
+
# 如果URL已经解析,则原样返回
|
|
41
159
|
if isinstance(url, ParseResult):
|
|
42
160
|
return url
|
|
161
|
+
# Otherwise, convert to unicode and parse
|
|
162
|
+
# 否则,转换为unicode并解析
|
|
43
163
|
return urlparse(to_unicode(url, encoding))
|
|
44
164
|
|
|
45
165
|
|
|
46
166
|
def escape_ajax(url):
|
|
47
167
|
"""
|
|
48
|
-
|
|
49
|
-
|
|
168
|
+
Convert AJAX URLs to crawlable URLs according to Google's specification.
|
|
169
|
+
根据Google的规范将AJAX URL转换为可爬取的URL。
|
|
50
170
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
>>> escape_ajax("www.example.com/ajax.html#!")
|
|
58
|
-
'www.example.com/ajax.html?_escaped_fragment_='
|
|
171
|
+
This function implements Google's "AJAX crawling scheme" which allows
|
|
172
|
+
search engines to crawl AJAX-based pages. It converts fragment identifiers
|
|
173
|
+
that start with an exclamation mark (!) to query parameters with the
|
|
174
|
+
"_escaped_fragment_" key.
|
|
175
|
+
此函数实现了Google的"AJAX爬取方案",该方案允许搜索引擎爬取基于AJAX的页面。
|
|
176
|
+
它将以感叹号(!)开头的片段标识符转换为带有"_escaped_fragment_"键的查询参数。
|
|
59
177
|
|
|
60
|
-
|
|
178
|
+
See: https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
|
|
61
179
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
180
|
+
Args:
|
|
181
|
+
url: The URL to convert.
|
|
182
|
+
要转换的URL。
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
str: The crawlable URL with _escaped_fragment_ parameter if the URL
|
|
186
|
+
contains an AJAX fragment, or the original URL otherwise.
|
|
187
|
+
如果URL包含AJAX片段,则返回带有_escaped_fragment_参数的可爬取URL,
|
|
188
|
+
否则返回原始URL。
|
|
189
|
+
|
|
190
|
+
Examples:
|
|
191
|
+
>>> escape_ajax("www.example.com/ajax.html#!key=value")
|
|
192
|
+
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
|
|
193
|
+
>>> escape_ajax("www.example.com/ajax.html?k1=v1&k2=v2#!key=value")
|
|
194
|
+
'www.example.com/ajax.html?k1=v1&k2=v2&_escaped_fragment_=key%3Dvalue'
|
|
195
|
+
>>> escape_ajax("www.example.com/ajax.html?#!key=value")
|
|
196
|
+
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
|
|
197
|
+
>>> escape_ajax("www.example.com/ajax.html#!")
|
|
198
|
+
'www.example.com/ajax.html?_escaped_fragment_='
|
|
199
|
+
|
|
200
|
+
URLs that are not "AJAX crawlable" (according to Google) returned as-is:
|
|
201
|
+
|
|
202
|
+
>>> escape_ajax("www.example.com/ajax.html#key=value")
|
|
203
|
+
'www.example.com/ajax.html#key=value'
|
|
204
|
+
>>> escape_ajax("www.example.com/ajax.html#")
|
|
205
|
+
'www.example.com/ajax.html#'
|
|
206
|
+
>>> escape_ajax("www.example.com/ajax.html")
|
|
207
|
+
'www.example.com/ajax.html'
|
|
68
208
|
"""
|
|
209
|
+
# Split the URL into the part before the fragment and the fragment itself
|
|
210
|
+
# 将URL拆分为片段之前的部分和片段本身
|
|
69
211
|
defrag, frag = urldefrag(url)
|
|
212
|
+
|
|
213
|
+
# If the fragment doesn't start with '!', it's not an AJAX URL
|
|
214
|
+
# 如果片段不以'!'开头,则它不是AJAX URL
|
|
70
215
|
if not frag.startswith('!'):
|
|
71
216
|
return url
|
|
217
|
+
|
|
218
|
+
# Convert the AJAX URL to a crawlable URL by adding the _escaped_fragment_ parameter
|
|
219
|
+
# 通过添加_escaped_fragment_参数将AJAX URL转换为可爬取的URL
|
|
72
220
|
return add_or_replace_parameter(defrag, '_escaped_fragment_', frag[1:])
|
|
73
221
|
|
|
74
222
|
|
|
75
223
|
def add_http_if_no_scheme(url):
|
|
76
|
-
"""
|
|
224
|
+
"""
|
|
225
|
+
Add http as the default scheme if it is missing from the URL.
|
|
226
|
+
如果URL中缺少协议,则添加http作为默认协议。
|
|
227
|
+
|
|
228
|
+
This function checks if the URL already has a scheme (like http://, https://, ftp://).
|
|
229
|
+
If not, it adds 'http:' or 'http://' depending on whether the URL already has a netloc.
|
|
230
|
+
此函数检查URL是否已有协议(如http://、https://、ftp://)。
|
|
231
|
+
如果没有,它会添加'http:'或'http://',具体取决于URL是否已有网络位置。
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
url: The URL to check and possibly modify.
|
|
235
|
+
要检查并可能修改的URL。
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
str: The URL with a scheme, either the original one or with 'http' added.
|
|
239
|
+
带有协议的URL,可能是原始协议或添加了'http'。
|
|
240
|
+
|
|
241
|
+
Examples:
|
|
242
|
+
>>> add_http_if_no_scheme("example.com")
|
|
243
|
+
'http://example.com'
|
|
244
|
+
>>> add_http_if_no_scheme("http://example.com")
|
|
245
|
+
'http://example.com'
|
|
246
|
+
>>> add_http_if_no_scheme("https://example.com")
|
|
247
|
+
'https://example.com'
|
|
248
|
+
"""
|
|
249
|
+
# Check if the URL already has a scheme
|
|
250
|
+
# 检查URL是否已有协议
|
|
77
251
|
match = re.match(r"^\w+://", url, flags=re.I)
|
|
78
252
|
if not match:
|
|
253
|
+
# Parse the URL to determine if it has a netloc
|
|
254
|
+
# 解析URL以确定它是否有网络位置
|
|
79
255
|
parts = urlparse(url)
|
|
256
|
+
# Add the appropriate http scheme
|
|
257
|
+
# 添加适当的http协议
|
|
80
258
|
scheme = "http:" if parts.netloc else "http://"
|
|
81
259
|
url = scheme + url
|
|
82
260
|
|
|
@@ -84,6 +262,24 @@ def add_http_if_no_scheme(url):
|
|
|
84
262
|
|
|
85
263
|
|
|
86
264
|
def _is_posix_path(string):
|
|
265
|
+
"""
|
|
266
|
+
Check if a string looks like a POSIX filesystem path.
|
|
267
|
+
检查字符串是否看起来像POSIX文件系统路径。
|
|
268
|
+
|
|
269
|
+
This function uses a regular expression to check if the string matches
|
|
270
|
+
common patterns for POSIX filesystem paths, such as absolute paths,
|
|
271
|
+
relative paths, and paths with home directory references.
|
|
272
|
+
此函数使用正则表达式检查字符串是否匹配POSIX文件系统路径的常见模式,
|
|
273
|
+
如绝对路径、相对路径和带有主目录引用的路径。
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
string: The string to check.
|
|
277
|
+
要检查的字符串。
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
bool: True if the string looks like a POSIX path, False otherwise.
|
|
281
|
+
如果字符串看起来像POSIX路径,则为True,否则为False。
|
|
282
|
+
"""
|
|
87
283
|
return bool(
|
|
88
284
|
re.match(
|
|
89
285
|
r'''
|
|
@@ -106,13 +302,31 @@ def _is_posix_path(string):
|
|
|
106
302
|
|
|
107
303
|
|
|
108
304
|
def _is_windows_path(string):
|
|
305
|
+
"""
|
|
306
|
+
Check if a string looks like a Windows filesystem path.
|
|
307
|
+
检查字符串是否看起来像Windows文件系统路径。
|
|
308
|
+
|
|
309
|
+
This function uses a regular expression to check if the string matches
|
|
310
|
+
common patterns for Windows filesystem paths, such as drive letters (C:\)
|
|
311
|
+
or UNC paths (\\server\share).
|
|
312
|
+
此函数使用正则表达式检查字符串是否匹配Windows文件系统路径的常见模式,
|
|
313
|
+
如驱动器号(C:\)或UNC路径(\\server\share)。
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
string: The string to check.
|
|
317
|
+
要检查的字符串。
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
bool: True if the string looks like a Windows path, False otherwise.
|
|
321
|
+
如果字符串看起来像Windows路径,则为True,否则为False。
|
|
322
|
+
"""
|
|
109
323
|
return bool(
|
|
110
324
|
re.match(
|
|
111
325
|
r'''
|
|
112
326
|
^
|
|
113
327
|
(
|
|
114
|
-
[a-z]:\\
|
|
115
|
-
| \\\\
|
|
328
|
+
[a-z]:\\ # Drive letter followed by :\
|
|
329
|
+
| \\\\ # Or UNC path starting with \\
|
|
116
330
|
)
|
|
117
331
|
''',
|
|
118
332
|
string,
|
|
@@ -122,38 +336,129 @@ def _is_windows_path(string):
|
|
|
122
336
|
|
|
123
337
|
|
|
124
338
|
def _is_filesystem_path(string):
|
|
339
|
+
"""
|
|
340
|
+
Check if a string looks like a filesystem path (either POSIX or Windows).
|
|
341
|
+
检查字符串是否看起来像文件系统路径(POSIX或Windows)。
|
|
342
|
+
|
|
343
|
+
This function combines the checks for both POSIX and Windows paths.
|
|
344
|
+
此函数结合了对POSIX和Windows路径的检查。
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
string: The string to check.
|
|
348
|
+
要检查的字符串。
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
bool: True if the string looks like a filesystem path, False otherwise.
|
|
352
|
+
如果字符串看起来像文件系统路径,则为True,否则为False。
|
|
353
|
+
"""
|
|
125
354
|
return _is_posix_path(string) or _is_windows_path(string)
|
|
126
355
|
|
|
127
356
|
|
|
128
357
|
def guess_scheme(url):
|
|
129
|
-
"""
|
|
130
|
-
|
|
358
|
+
"""
|
|
359
|
+
Add an appropriate URL scheme if missing from the input.
|
|
360
|
+
如果输入中缺少适当的URL协议,则添加它。
|
|
361
|
+
|
|
362
|
+
This function examines the input and adds an appropriate scheme:
|
|
363
|
+
- 'file://' for filesystem paths (both POSIX and Windows)
|
|
364
|
+
- 'http://' for other inputs that look like URLs
|
|
365
|
+
此函数检查输入并添加适当的协议:
|
|
366
|
+
- 对于文件系统路径(POSIX和Windows),添加'file://'
|
|
367
|
+
- 对于看起来像URL的其他输入,添加'http://'
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
url: The URL or path to process.
|
|
371
|
+
要处理的URL或路径。
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
str: The URL with an appropriate scheme added if it was missing.
|
|
375
|
+
添加了适当协议(如果缺少)的URL。
|
|
376
|
+
|
|
377
|
+
Note:
|
|
378
|
+
This function uses any_to_uri() from w3lib.url to convert filesystem
|
|
379
|
+
paths to proper file:// URLs.
|
|
380
|
+
此函数使用w3lib.url中的any_to_uri()将文件系统路径转换为适当的file://URL。
|
|
381
|
+
"""
|
|
382
|
+
# If it looks like a filesystem path, convert it to a file:// URL
|
|
383
|
+
# 如果它看起来像文件系统路径,将其转换为file://URL
|
|
131
384
|
if _is_filesystem_path(url):
|
|
132
385
|
return any_to_uri(url)
|
|
386
|
+
# Otherwise, add http:// if needed
|
|
387
|
+
# 否则,如果需要,添加http://
|
|
133
388
|
return add_http_if_no_scheme(url)
|
|
134
389
|
|
|
135
390
|
|
|
136
391
|
def strip_url(url, strip_credentials=True, strip_default_port=True, origin_only=False, strip_fragment=True):
|
|
392
|
+
"""
|
|
393
|
+
Strip a URL string of some of its components.
|
|
394
|
+
从URL字符串中去除某些组件。
|
|
137
395
|
|
|
138
|
-
|
|
396
|
+
This function allows selectively removing parts of a URL, such as credentials,
|
|
397
|
+
default ports, paths, queries, and fragments. It's useful for normalizing URLs
|
|
398
|
+
or removing sensitive information.
|
|
399
|
+
此函数允许选择性地移除URL的部分内容,如凭据、默认端口、路径、查询和片段。
|
|
400
|
+
它对于规范化URL或移除敏感信息很有用。
|
|
139
401
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
402
|
+
Args:
|
|
403
|
+
url: The URL to strip.
|
|
404
|
+
要处理的URL。
|
|
405
|
+
strip_credentials: Whether to remove "user:password@" from the URL.
|
|
406
|
+
是否从URL中移除"user:password@"。
|
|
407
|
+
Defaults to True.
|
|
408
|
+
默认为True。
|
|
409
|
+
strip_default_port: Whether to remove default ports (":80" for http,
|
|
410
|
+
":443" for https, ":21" for ftp) from the URL.
|
|
411
|
+
是否从URL中移除默认端口(http的":80",
|
|
412
|
+
https的":443",ftp的":21")。
|
|
413
|
+
Defaults to True.
|
|
414
|
+
默认为True。
|
|
415
|
+
origin_only: Whether to keep only the origin part of the URL (scheme and netloc),
|
|
416
|
+
replacing the path with "/" and removing params, query, and fragment.
|
|
417
|
+
是否只保留URL的源部分(协议和网络位置),
|
|
418
|
+
将路径替换为"/"并移除参数、查询和片段。
|
|
419
|
+
This also implies strip_credentials=True.
|
|
420
|
+
这也意味着strip_credentials=True。
|
|
421
|
+
Defaults to False.
|
|
422
|
+
默认为False。
|
|
423
|
+
strip_fragment: Whether to remove any #fragment component from the URL.
|
|
424
|
+
是否从URL中移除任何#片段组件。
|
|
425
|
+
Defaults to True.
|
|
426
|
+
默认为True。
|
|
427
|
+
|
|
428
|
+
Returns:
|
|
429
|
+
str: The stripped URL.
|
|
430
|
+
处理后的URL。
|
|
147
431
|
|
|
432
|
+
Examples:
|
|
433
|
+
>>> strip_url("http://user:pass@example.com:80/path?query#fragment")
|
|
434
|
+
'http://example.com/path?query'
|
|
435
|
+
>>> strip_url("http://user:pass@example.com:80/path?query#fragment",
|
|
436
|
+
... strip_credentials=False, strip_fragment=False)
|
|
437
|
+
'http://user:pass@example.com/path?query#fragment'
|
|
438
|
+
>>> strip_url("http://user:pass@example.com:80/path?query#fragment",
|
|
439
|
+
... origin_only=True)
|
|
440
|
+
'http://example.com/'
|
|
441
|
+
"""
|
|
442
|
+
# Parse the URL into its components
|
|
443
|
+
# 将URL解析为其组件
|
|
148
444
|
parsed_url = urlparse(url)
|
|
149
445
|
netloc = parsed_url.netloc
|
|
446
|
+
|
|
447
|
+
# Remove credentials if requested or if origin_only is True
|
|
448
|
+
# 如果请求或如果origin_only为True,则移除凭据
|
|
150
449
|
if (strip_credentials or origin_only) and (parsed_url.username or parsed_url.password):
|
|
151
450
|
netloc = netloc.split('@')[-1]
|
|
451
|
+
|
|
452
|
+
# Remove default ports if requested
|
|
453
|
+
# 如果请求,则移除默认端口
|
|
152
454
|
if strip_default_port and parsed_url.port:
|
|
153
455
|
if (parsed_url.scheme, parsed_url.port) in (('http', 80),
|
|
154
456
|
('https', 443),
|
|
155
457
|
('ftp', 21)):
|
|
156
458
|
netloc = netloc.replace(f':{parsed_url.port}', '')
|
|
459
|
+
|
|
460
|
+
# Reconstruct the URL with the desired components
|
|
461
|
+
# 使用所需组件重建URL
|
|
157
462
|
return urlunparse((
|
|
158
463
|
parsed_url.scheme,
|
|
159
464
|
netloc,
|
|
@@ -1,133 +0,0 @@
|
|
|
1
|
-
aioscrapy/VERSION,sha256=BuGd6tadzBa8VzzJ8ktotpkH9M_Ur7nXA3LMa0YqmGI,5
|
|
2
|
-
aioscrapy/__init__.py,sha256=esJeH66Mz9WV7XbotvZEjNn49jc589YZ_L2DKoD0JvA,858
|
|
3
|
-
aioscrapy/__main__.py,sha256=rvTdJ0cQwbi29aucPj3jJRpccx5SBzvRcV7qvxvX2NQ,80
|
|
4
|
-
aioscrapy/cmdline.py,sha256=1qhNg2Edl-Obmf2re2K4V8pJG7ubGfZZCzcHdKtdE_s,5159
|
|
5
|
-
aioscrapy/crawler.py,sha256=6-ptivIjIGKdojOlZqXV0hV3x1Gont81tOC5u5JqIME,10330
|
|
6
|
-
aioscrapy/exceptions.py,sha256=k1daw1hV_aqsaIKKibdyqcNPyVn5oUb07wmB2DRxfjs,2111
|
|
7
|
-
aioscrapy/link.py,sha256=fXMqsHvYEzsuYi-sNDcElS7jV6Lusq0tjPkPUGOlyZw,1867
|
|
8
|
-
aioscrapy/logformatter.py,sha256=y3etd28ACbpTbcGprJ_cQ086gxQY3k_QX_yxYFoF1AU,3028
|
|
9
|
-
aioscrapy/process.py,sha256=uFkj2wzaBu0Vs3pGFKdJ4R-0Gn7hROX6EU-B5zddnyQ,1603
|
|
10
|
-
aioscrapy/serializer.py,sha256=eTMSMHQZJidJ3-LzjmfJd-pkkG29G3NnrtYUx-eKB8w,734
|
|
11
|
-
aioscrapy/signalmanager.py,sha256=S_dTEa8Y75x8SSHEQem014o-OmxwvfwaMH9w34lCkTc,2402
|
|
12
|
-
aioscrapy/signals.py,sha256=rXkPNS6c9rIv_TzKU3lqYGQc8UA3CbqSsHL9O1U6MTE,610
|
|
13
|
-
aioscrapy/spiderloader.py,sha256=XJ-lUUtf9Xy7172VzlvPzLes-2ym3GRw7lAdJ7sankc,3426
|
|
14
|
-
aioscrapy/statscollectors.py,sha256=L1ykz0zCqt7Qw85NyhWH8zfwVOtnQOWMI0065YP03CQ,2036
|
|
15
|
-
aioscrapy/commands/__init__.py,sha256=hNPzpaYpnX3mxLjQgKM211xAuMgjhxMwjpDRCrvFVwI,4769
|
|
16
|
-
aioscrapy/commands/crawl.py,sha256=fKrQEdR-YJuiXpDIX3R0QdhOgjDwDye7C6ilrj77C1A,1020
|
|
17
|
-
aioscrapy/commands/genspider.py,sha256=381W8-pjbASGXvYjrrgYWorJD9BjURa25oolDm0QI1Q,5426
|
|
18
|
-
aioscrapy/commands/list.py,sha256=j62GHgngGRdW-AGKCJBvrc_h26yj0JmEFBkXTvjRoi8,346
|
|
19
|
-
aioscrapy/commands/runspider.py,sha256=albdJM3SUbWzhiOK1zsWklp3HqGSPRISTshg33EnZ_c,2067
|
|
20
|
-
aioscrapy/commands/settings.py,sha256=sc0rwwfBQNySKX8uV3iJqv3i7SelFwNcrlHYxDupKOg,1798
|
|
21
|
-
aioscrapy/commands/startproject.py,sha256=Rcc7JkN75Jp2t2aZIxBzPsWbLXChNAUSByDhcW_6Ig8,4001
|
|
22
|
-
aioscrapy/commands/version.py,sha256=yqqTMlZkkiQhtbU9w_IqUWLMOAjqYlv24friEkPRQYM,485
|
|
23
|
-
aioscrapy/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
|
-
aioscrapy/core/engine.py,sha256=h02-K2lQqlCxvNIlURgPpnhHCbyiJRIWrFJt5Ys7vZY,9843
|
|
25
|
-
aioscrapy/core/scheduler.py,sha256=czCx5oHknXuHadpISTfoEMSKXXrlwJTmLTUQtHdtaTc,7407
|
|
26
|
-
aioscrapy/core/scraper.py,sha256=eS_qEX_Q9fXZnK8Ou1wDtBJhRKk9JoUSnbn4c04u1cA,10750
|
|
27
|
-
aioscrapy/core/downloader/__init__.py,sha256=2EUQHGS6Q8fy9eoDT_kmA-eYZEUBV9OkmTppfG05AFA,10039
|
|
28
|
-
aioscrapy/core/downloader/handlers/__init__.py,sha256=CriaX2Cp4jUqzDDGZDB7HiIEgUWt2pnYVho6HMV6sJ0,3198
|
|
29
|
-
aioscrapy/core/downloader/handlers/aiohttp.py,sha256=KY04ATlu2cTVF3Uxtvqpx1cGxOn2QVIGWoj7S3_UwF4,4266
|
|
30
|
-
aioscrapy/core/downloader/handlers/curl_cffi.py,sha256=LpwWdHxctDOxVvhzYcgG1qGhPtw9DoqjUKEdrZlerAQ,2579
|
|
31
|
-
aioscrapy/core/downloader/handlers/httpx.py,sha256=4XyLpiaXbO0AtPqctFJgYFX-5rJrKyf469YFpVJdcRY,3387
|
|
32
|
-
aioscrapy/core/downloader/handlers/pyhttpx.py,sha256=phpWXtuvP-9tve3MDnWeto2Dmo25UTxgLYc12QEvMt4,2562
|
|
33
|
-
aioscrapy/core/downloader/handlers/requests.py,sha256=UnV1WDyET8WMwyYYU0DcL_r420uz_0dK1ej5xAl2fwk,2317
|
|
34
|
-
aioscrapy/core/downloader/handlers/playwright/__init__.py,sha256=PXS40Vv3KsV77QoyWCrWcHL6mItXxpTzwaCATB6RXiQ,4504
|
|
35
|
-
aioscrapy/core/downloader/handlers/playwright/driverpool.py,sha256=qfIdGjORdn1MookO-ucIJ8NOeLrIQ0y0UJY_xuMzM_8,1374
|
|
36
|
-
aioscrapy/core/downloader/handlers/playwright/webdriver.py,sha256=QFtAT--2Ea_Gg4x1EhMidyOwQjbqljUl4sKGB_hAA00,3530
|
|
37
|
-
aioscrapy/db/__init__.py,sha256=ISBXM_-cCf5CgTLc3i_emLxV163-ZAbgttkQiRxokD0,2456
|
|
38
|
-
aioscrapy/db/absmanager.py,sha256=6vlPcjDHOtZCHePiUYPe6ezRnM-TB4XLhmuw7APaWDk,1162
|
|
39
|
-
aioscrapy/db/aiomongo.py,sha256=t4JpRPBBisF7_rz02Kp6AejrphLvLWg5rF-yYLIe2MI,3071
|
|
40
|
-
aioscrapy/db/aiomysql.py,sha256=-xCLfeH7RzvghY1jqREAb_Qnz9q_dVjxoHGfz7sCqbU,3799
|
|
41
|
-
aioscrapy/db/aiopg.py,sha256=WG4s_2X0b8LQHbZpoIrwZeuGHNolKj-SvmvAZQlCk00,3213
|
|
42
|
-
aioscrapy/db/aiorabbitmq.py,sha256=tNKl4Kx7KM7H_lOj8xfeA0uD8PuBTVzySApTEn5TyAE,5583
|
|
43
|
-
aioscrapy/db/aioredis.py,sha256=UOoTRTQUvghnq29bVL8v1HvksMXYOzHaS8Btgbpn0bY,2966
|
|
44
|
-
aioscrapy/dupefilters/__init__.py,sha256=Dx_CN-wBYiatLj3cXbK0f5d66CTjzzTeex6565L1EsA,1765
|
|
45
|
-
aioscrapy/dupefilters/disk.py,sha256=EMgxeC2a6aYCGKgp4QOs5xwHp33LUsOZ8pliKBTFx1c,1551
|
|
46
|
-
aioscrapy/dupefilters/redis.py,sha256=YUUsnRQ326PjdM_FUWAWjgOWw92KwswAGnM-FmN8pv0,7559
|
|
47
|
-
aioscrapy/http/__init__.py,sha256=yeQTT5W1iwr6dKznTS5d9vnx2hsB47i9roPM57wQp_0,597
|
|
48
|
-
aioscrapy/http/headers.py,sha256=H-RJ6KqOsFFFAXORfvoyz3V-ud0I8TAj5Jt5fAACcLc,1573
|
|
49
|
-
aioscrapy/http/request/__init__.py,sha256=PFoFU3ncTN-gj6Rx01rjVa_744Qfv3EH29mooW6JX9U,7121
|
|
50
|
-
aioscrapy/http/request/form.py,sha256=pqexRCmGlTiE9FofKa3_OpoK6yoJlf9qk9hxNveDoaw,1382
|
|
51
|
-
aioscrapy/http/request/json_request.py,sha256=qtWdF5UhuGy0QmyLKWm9Y86veISkZS5HyOJiavtEhP4,2051
|
|
52
|
-
aioscrapy/http/response/__init__.py,sha256=6DyNQNVgpJ-Awd8Cu9eCTXF_nmQF87WoAPPj5aBrigA,6731
|
|
53
|
-
aioscrapy/http/response/html.py,sha256=PiBguPg2uE1lY_WNjs419EqbZv38pGsZ5JqTUYQWeMU,303
|
|
54
|
-
aioscrapy/http/response/playwright.py,sha256=0DX0L04hGv1zvtXvdYS7_UQFKbnbx2r7Sj-ZKirxtr0,1051
|
|
55
|
-
aioscrapy/http/response/text.py,sha256=VxpjTIGtnVuUepa3GdsyX5kskqnFVCjmZmfonSynbhM,9844
|
|
56
|
-
aioscrapy/http/response/xml.py,sha256=5iXsTuxFW1eBLrzYggjyf-FTDZwWZwY1ATZceIa5yxM,300
|
|
57
|
-
aioscrapy/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
58
|
-
aioscrapy/libs/downloader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
|
-
aioscrapy/libs/downloader/defaultheaders.py,sha256=tg_ULA0Y-41bZKG607mowFJQGVfnZ45LdR044DsjA_A,563
|
|
60
|
-
aioscrapy/libs/downloader/downloadtimeout.py,sha256=hNh3OEj7rC0ceQrv_yrhR5lb5AvfxJ6cspj3qsQWj4o,704
|
|
61
|
-
aioscrapy/libs/downloader/ja3fingerprint.py,sha256=DgTw74GXC_Bp94eD_bwoG6A_DphUHTt7bH4glBNXyV8,1058
|
|
62
|
-
aioscrapy/libs/downloader/retry.py,sha256=0670bPz5lc4wUsWmYlhYdGZdeflsQdFhJbnwK1g0c84,4441
|
|
63
|
-
aioscrapy/libs/downloader/stats.py,sha256=FlkS8Zm4j3SBjHb6caXwq08HvvZ37VKORGCAjlA2U38,1376
|
|
64
|
-
aioscrapy/libs/downloader/useragent.py,sha256=E5x5dk9AxsSCGDDICJlTXwWXRkqAibWgesqG0VhAG8M,743
|
|
65
|
-
aioscrapy/libs/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
66
|
-
aioscrapy/libs/extensions/closespider.py,sha256=LRv4G7RHwFeT9WeH6s0spO909BHvxdtlvHWLN-dlyno,2734
|
|
67
|
-
aioscrapy/libs/extensions/corestats.py,sha256=WCZ4nnk6LUP7AdGx9mnuVm96iWMxHozxdNPr41r8HmQ,1820
|
|
68
|
-
aioscrapy/libs/extensions/logstats.py,sha256=wSLbN9tmsw5I1FBxHjLfIdQo85fxJI7TmOefispaxc4,1844
|
|
69
|
-
aioscrapy/libs/extensions/metric.py,sha256=cx9UnSdj6akzrPe_uwWHh_QKTNzD82VRrEjiiHOoAuc,5479
|
|
70
|
-
aioscrapy/libs/extensions/throttle.py,sha256=yos2D3XZgH40G52kltMKv5_GeAK4MqpRwTu6FCErUh0,3512
|
|
71
|
-
aioscrapy/libs/pipelines/__init__.py,sha256=XW5Ur6bhvGLo-w-tdUeIB4jkFpZxqUU9mbajfAAztb0,5642
|
|
72
|
-
aioscrapy/libs/pipelines/csv.py,sha256=-PEZOt-3ndF0ePO7EnqjEqeCYMJR9wHv3XcpSq6QswI,2454
|
|
73
|
-
aioscrapy/libs/pipelines/execl.py,sha256=a8sfgQCHUc0MIja9cPP4TZ6ghfkxYZuAzLDIK4_nQuo,6284
|
|
74
|
-
aioscrapy/libs/pipelines/mongo.py,sha256=B3dhvspxc4lmPh2noqARYV-rFuHfivdSfZ7ZlPKnk7c,2323
|
|
75
|
-
aioscrapy/libs/pipelines/mysql.py,sha256=gN4DnyuXTQvDvy9Gu-v8F6sT8l7GZEa45AD0d-Ckv8s,1022
|
|
76
|
-
aioscrapy/libs/pipelines/pg.py,sha256=la-SflXtGFw4IQYlOn75Brw2IfmtOUcCh0gUSz_Jg-0,990
|
|
77
|
-
aioscrapy/libs/spider/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
78
|
-
aioscrapy/libs/spider/depth.py,sha256=IZoLS-JAqLl79JrQQf5p8aubN4c82g44M2iUkhRhXjM,1938
|
|
79
|
-
aioscrapy/libs/spider/httperror.py,sha256=v8_zuTpzyU7nszo-97TEE5iniKPdrvKrPKx6bKDtQyI,1857
|
|
80
|
-
aioscrapy/libs/spider/offsite.py,sha256=QQx15cozRPsjIvYobeiIgTeMwgtgDk5yMgf6Y6qT9s4,2941
|
|
81
|
-
aioscrapy/libs/spider/referer.py,sha256=vdbk_uQI4o0MncCHQauVX7xcxERWSqw5ZkaJVVrYcr0,13768
|
|
82
|
-
aioscrapy/libs/spider/urllength.py,sha256=D2sEt-LeibHYfkO1tUJxENeyI1PrwIb7HrLIBNU3HdA,1151
|
|
83
|
-
aioscrapy/middleware/__init__.py,sha256=Ej7BAmxdHDqaytQT1XA3V73CpBedkZBQsRni0fduuck,396
|
|
84
|
-
aioscrapy/middleware/absmanager.py,sha256=dwm4nEyRIjP_V6BJqI-tl9NZF0HwsafDNeWyqZCwSrA,3410
|
|
85
|
-
aioscrapy/middleware/downloader.py,sha256=SSeHXtECbGE5r6o6EXLo9ZIrPX6wqkFTwEoN4F6qHgM,3199
|
|
86
|
-
aioscrapy/middleware/extension.py,sha256=LS6Q9VFYVa9oemS7DiKsyehhz8244alj4Jtlnl0f4DY,420
|
|
87
|
-
aioscrapy/middleware/itempipeline.py,sha256=_Htfrs3vzIUfajTOzQLdGaX_4xTFtSSoFzyhZJMyZw8,674
|
|
88
|
-
aioscrapy/middleware/spider.py,sha256=QvV5dchOlskBn1sXKd5dj6s9zSZmlT6LibydCjfmYjU,6361
|
|
89
|
-
aioscrapy/proxy/__init__.py,sha256=Cwua97Z-ezxtDSlud7mCOAV-iExY7RX_8O1oP5PS__k,1807
|
|
90
|
-
aioscrapy/proxy/redis.py,sha256=LFfnnkihf6Wq1-HeRzPLVEiy5e5wxJbMY7htU-C_Pd8,2711
|
|
91
|
-
aioscrapy/queue/__init__.py,sha256=MKHNOgcZRAjFHAxoKLujvsBCkB_Ne1-gz5DqNbdTYNA,2037
|
|
92
|
-
aioscrapy/queue/memory.py,sha256=_Dvkd-HXwdR8B-wsEPlHNWbAgaMH9M_UGZhF21LbnHA,3140
|
|
93
|
-
aioscrapy/queue/rabbitmq.py,sha256=rE1GCoIGxoaV4KAi_9Umt623A00FaHHIFV5ZH_nypzY,2516
|
|
94
|
-
aioscrapy/queue/redis.py,sha256=KU31ZNciLI9xxZDxsDhtOPLtmkxZQlRPOx_1z8afdwY,4788
|
|
95
|
-
aioscrapy/scrapyd/__init__.py,sha256=Ey14RVLUP7typ2XqP8RWcUum2fuFyigdhuhBBiEheIo,68
|
|
96
|
-
aioscrapy/scrapyd/runner.py,sha256=tewEkdNTMrBoredCbhmdrswSrF-GWsU3MLgC__ntnzQ,1777
|
|
97
|
-
aioscrapy/settings/__init__.py,sha256=GuiVhezV8U2J1B-WJwSvxxeH_1YWYD_Wighr9owC4HU,15781
|
|
98
|
-
aioscrapy/settings/default_settings.py,sha256=PrUOFYNnPIS8eCdqvRylMLBK-4tT-2MYuU6Nn8dQrx0,5639
|
|
99
|
-
aioscrapy/spiders/__init__.py,sha256=oM_FzqWa46P6cjzarOO1cfDTQD2AuIPgaWZrmdMcuTI,4085
|
|
100
|
-
aioscrapy/templates/project/aioscrapy.cfg,sha256=_nRHP5wtPnZaBi7wCmjWv5BgUu5NYFJZhvCTRVSipyM,112
|
|
101
|
-
aioscrapy/templates/project/module/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
102
|
-
aioscrapy/templates/project/module/middlewares.py.tmpl,sha256=0eEf2LC0vYcWPH82HNqieYSORyUuIo3Bgl5t-neRAJ4,3469
|
|
103
|
-
aioscrapy/templates/project/module/pipelines.py.tmpl,sha256=-MYA7MFAffH8FTG1VGAkhIJLQ6MOGMrWVDO7cI9jw9A,164
|
|
104
|
-
aioscrapy/templates/project/module/settings.py.tmpl,sha256=AO2jmyokUhuhFqxMvsMihPgSY4ZrldsMs-BuOEVfvQY,1421
|
|
105
|
-
aioscrapy/templates/project/module/spiders/__init__.py,sha256=Zg1uss1vaNjvld9s9Ccua50SxVZwpFTPwqpBHoCrWdU,164
|
|
106
|
-
aioscrapy/templates/spiders/basic.tmpl,sha256=oO1vh7-TZLjvpwdrYC49TGe-A6Kulc8UIG4Sa0QhDfI,375
|
|
107
|
-
aioscrapy/templates/spiders/single.tmpl,sha256=Ptmo_uFDGEffvpEMyxec7sxIyBbP05x0Grhn5u6lZbQ,1011
|
|
108
|
-
aioscrapy/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
109
|
-
aioscrapy/utils/conf.py,sha256=NkSmKjOE7xVvrAWQu4ne3jOzNGucgZdWHPhGbpz8dPU,7208
|
|
110
|
-
aioscrapy/utils/curl.py,sha256=I8eZWFNgvyUiJ2YS9-s3HltGNVG8XMMU0HPhlMxuxdA,3295
|
|
111
|
-
aioscrapy/utils/decorators.py,sha256=gMQArNxF9QQc1bENA0IqDchAjqmfWvHGKOyUdjXdg6A,794
|
|
112
|
-
aioscrapy/utils/deprecate.py,sha256=STy55Q8kZI8q8CQUfxK4QQVu1Rs4En3rlhzWc7p7T00,5467
|
|
113
|
-
aioscrapy/utils/httpobj.py,sha256=ytec7IZzsQY_GwR___051hdbOWs1ZM6S57HwcNiu2es,708
|
|
114
|
-
aioscrapy/utils/log.py,sha256=NRDivw8w21J77qEUeqqLdC4sgdIKaj2UAP6lDvWGotM,1697
|
|
115
|
-
aioscrapy/utils/misc.py,sha256=9NOssEl7CP_c6R9skxyXwmz4bd-nZ_gkw6F0EybeLTQ,3509
|
|
116
|
-
aioscrapy/utils/ossignal.py,sha256=jAsCIKu17KV45-9dZwEkFJHF31Y13KP_zxY0x49j1jo,896
|
|
117
|
-
aioscrapy/utils/project.py,sha256=cT98HaR5JaNmm-Y1UzSuzXj6B5S7GlmMshUfMhjpjJY,2905
|
|
118
|
-
aioscrapy/utils/python.py,sha256=38oD-OSjeGb3XZFJn3bt74PwGbejnBfLWC5-lkUL0g8,4462
|
|
119
|
-
aioscrapy/utils/reqser.py,sha256=qjrYut6KtvGpLLd-HDM0cncNzWCtXgpH6NyERu_5A9g,487
|
|
120
|
-
aioscrapy/utils/request.py,sha256=bkFaLDeebAOp7pF-7vta9LKOB2OR2s7V9jVKfA-XlqA,2418
|
|
121
|
-
aioscrapy/utils/response.py,sha256=UPR1wTTAYZkLGiiIs28kJLhlF7WPrgLuW31l9LZuYKM,1341
|
|
122
|
-
aioscrapy/utils/signal.py,sha256=bkqRgGMqQ82dly_D4tDe_0pHBbc9QUxBJqSsH9RSQf0,2282
|
|
123
|
-
aioscrapy/utils/spider.py,sha256=Usq3UlCaDUvXGp0ojFt39UPKFrR2rbInlJc_q0Xk7Qc,610
|
|
124
|
-
aioscrapy/utils/template.py,sha256=HR97X4lpv2WuqhuPfzTgaBN66fYnzHVpP6zQ5IoTwcI,833
|
|
125
|
-
aioscrapy/utils/tools.py,sha256=WJowViZB8XEs2CFqjVvbqXK3H5Uvf4BgWgBD_RcHMaM,2319
|
|
126
|
-
aioscrapy/utils/trackref.py,sha256=0nIpelT1d5WYxALl8SGA8vHNYsh-jS0Z2lwVEAhwx8E,2019
|
|
127
|
-
aioscrapy/utils/url.py,sha256=8W8tAhU7lgfPOfzKp3ejJGEcLj1i_PnA_53Jv5LpxiY,5464
|
|
128
|
-
aio_scrapy-2.1.3.dist-info/LICENSE,sha256=L-UoAEM3fQSjKA7FVWxQM7gwSCbeue6gZRAnpRS_UCo,1088
|
|
129
|
-
aio_scrapy-2.1.3.dist-info/METADATA,sha256=ldFQ8PbukunEAMxkUNbPQsA2Iuo6VfyYwPQbjc7KETk,6506
|
|
130
|
-
aio_scrapy-2.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
131
|
-
aio_scrapy-2.1.3.dist-info/entry_points.txt,sha256=WWhoVHZvqhW8a5uFg97K0EP_GjG3uuCIFLkyqDICgaw,56
|
|
132
|
-
aio_scrapy-2.1.3.dist-info/top_level.txt,sha256=8l08KyMt22wfX_5BmhrGH0PgwZdzZIPq-hBUa1GNir4,10
|
|
133
|
-
aio_scrapy-2.1.3.dist-info/RECORD,,
|