crawlo 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (95) hide show
  1. crawlo/__init__.py +25 -9
  2. crawlo/__version__.py +1 -1
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -57
  7. crawlo/crawler.py +424 -242
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +200 -259
  10. crawlo/downloader/cffi_downloader.py +277 -0
  11. crawlo/downloader/httpx_downloader.py +246 -187
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +73 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/extension/logging_extension.py +35 -0
  18. crawlo/filters/__init__.py +37 -37
  19. crawlo/filters/aioredis_filter.py +150 -150
  20. crawlo/filters/memory_filter.py +202 -202
  21. crawlo/items/__init__.py +62 -62
  22. crawlo/items/items.py +115 -119
  23. crawlo/middleware/__init__.py +21 -21
  24. crawlo/middleware/default_header.py +32 -32
  25. crawlo/middleware/download_delay.py +28 -28
  26. crawlo/middleware/middleware_manager.py +135 -140
  27. crawlo/middleware/proxy.py +246 -0
  28. crawlo/middleware/request_ignore.py +30 -30
  29. crawlo/middleware/response_code.py +18 -18
  30. crawlo/middleware/response_filter.py +26 -26
  31. crawlo/middleware/retry.py +90 -90
  32. crawlo/network/__init__.py +7 -7
  33. crawlo/network/request.py +203 -204
  34. crawlo/network/response.py +166 -166
  35. crawlo/pipelines/__init__.py +13 -13
  36. crawlo/pipelines/console_pipeline.py +39 -39
  37. crawlo/pipelines/mongo_pipeline.py +116 -116
  38. crawlo/pipelines/mysql_batch_pipline.py +273 -134
  39. crawlo/pipelines/mysql_pipeline.py +195 -195
  40. crawlo/pipelines/pipeline_manager.py +56 -56
  41. crawlo/settings/__init__.py +7 -7
  42. crawlo/settings/default_settings.py +169 -94
  43. crawlo/settings/setting_manager.py +99 -99
  44. crawlo/spider/__init__.py +41 -36
  45. crawlo/stats_collector.py +59 -59
  46. crawlo/subscriber.py +106 -106
  47. crawlo/task_manager.py +27 -27
  48. crawlo/templates/item_template.tmpl +21 -21
  49. crawlo/templates/project_template/main.py +32 -32
  50. crawlo/templates/project_template/setting.py +189 -189
  51. crawlo/templates/spider_template.tmpl +30 -30
  52. crawlo/utils/__init__.py +7 -7
  53. crawlo/utils/concurrency_manager.py +124 -124
  54. crawlo/utils/date_tools.py +233 -177
  55. crawlo/utils/db_helper.py +344 -0
  56. crawlo/utils/func_tools.py +82 -82
  57. crawlo/utils/log.py +129 -39
  58. crawlo/utils/pqueue.py +173 -173
  59. crawlo/utils/project.py +59 -59
  60. crawlo/utils/request.py +267 -122
  61. crawlo/utils/system.py +11 -11
  62. crawlo/utils/tools.py +5 -303
  63. crawlo/utils/url.py +39 -39
  64. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/METADATA +49 -48
  65. crawlo-1.0.5.dist-info/RECORD +84 -0
  66. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/top_level.txt +1 -0
  67. examples/__init__.py +0 -0
  68. examples/gxb/__init__.py +0 -0
  69. examples/gxb/items.py +36 -0
  70. examples/gxb/run.py +15 -0
  71. examples/gxb/settings.py +71 -0
  72. examples/gxb/spider/__init__.py +0 -0
  73. examples/gxb/spider/miit_spider.py +180 -0
  74. examples/gxb/spider/telecom_device_licenses.py +129 -0
  75. tests/__init__.py +7 -7
  76. tests/test_proxy_health_check.py +33 -0
  77. tests/test_proxy_middleware_integration.py +137 -0
  78. tests/test_proxy_providers.py +57 -0
  79. tests/test_proxy_stats.py +20 -0
  80. tests/test_proxy_strategies.py +60 -0
  81. crawlo/downloader/playwright_downloader.py +0 -161
  82. crawlo-1.0.4.dist-info/RECORD +0 -79
  83. tests/baidu_spider/__init__.py +0 -7
  84. tests/baidu_spider/demo.py +0 -94
  85. tests/baidu_spider/items.py +0 -25
  86. tests/baidu_spider/middleware.py +0 -49
  87. tests/baidu_spider/pipeline.py +0 -55
  88. tests/baidu_spider/request_fingerprints.txt +0 -9
  89. tests/baidu_spider/run.py +0 -27
  90. tests/baidu_spider/settings.py +0 -80
  91. tests/baidu_spider/spiders/__init__.py +0 -7
  92. tests/baidu_spider/spiders/bai_du.py +0 -61
  93. tests/baidu_spider/spiders/sina.py +0 -79
  94. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/WHEEL +0 -0
  95. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/entry_points.txt +0 -0
crawlo/utils/tools.py CHANGED
@@ -1,303 +1,5 @@
1
- import json
2
- import re
3
- from pprint import pformat
4
- from datetime import date, time, datetime
5
-
6
- from crawlo.utils.log import get_logger
7
-
8
-
9
- logger = get_logger(__name__)
10
-
11
-
12
- def make_insert_sql(
13
- table, data, auto_update=False, update_columns=(), insert_ignore=False
14
- ):
15
- """
16
- @summary: 适用于mysql
17
- ---------
18
- @param table:
19
- @param data: 表数据 json格式
20
- @param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
21
- @param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
22
- @param insert_ignore: 数据存在忽略
23
- ---------
24
- @result:
25
- """
26
-
27
- keys = ["`{}`".format(key) for key in data.keys()]
28
- keys = list2str(keys).replace("'", "")
29
-
30
- values = [format_sql_value(value) for value in data.values()]
31
- values = list2str(values)
32
-
33
- if update_columns:
34
- if not isinstance(update_columns, (tuple, list)):
35
- update_columns = [update_columns]
36
- update_columns_ = ", ".join(
37
- ["{key}=values({key})".format(key=key) for key in update_columns]
38
- )
39
- sql = (
40
- "insert%s into `{table}` {keys} values {values} on duplicate key update %s"
41
- % (" ignore" if insert_ignore else "", update_columns_)
42
- )
43
-
44
- elif auto_update:
45
- sql = "replace into `{table}` {keys} values {values}"
46
- else:
47
- sql = "insert%s into `{table}` {keys} values {values}" % (
48
- " ignore" if insert_ignore else ""
49
- )
50
-
51
- sql = sql.format(table=table, keys=keys, values=values).replace("None", "null")
52
- return sql
53
-
54
-
55
- def make_update_sql(table, data, condition):
56
- """
57
- @summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
58
- ---------
59
- @param table:
60
- @param data: 表数据 json格式
61
- @param condition: where 条件
62
- ---------
63
- @result:
64
- """
65
- key_values = []
66
-
67
- for key, value in data.items():
68
- value = format_sql_value(value)
69
- if isinstance(value, str):
70
- key_values.append("`{}`={}".format(key, repr(value)))
71
- elif value is None:
72
- key_values.append("`{}`={}".format(key, "null"))
73
- else:
74
- key_values.append("`{}`={}".format(key, value))
75
-
76
- key_values = ", ".join(key_values)
77
-
78
- sql = "update `{table}` set {key_values} where {condition}"
79
- sql = sql.format(table=table, key_values=key_values, condition=condition)
80
- return sql
81
-
82
-
83
- def make_batch_sql(
84
- table, datas, auto_update=False, update_columns=(), update_columns_value=()
85
- ):
86
- """
87
- @summary: 生成批量的SQL
88
- ---------
89
- @param table:
90
- @param datas: 表数据 [{...}]
91
- @param auto_update: 使用的是replace into,为完全覆盖已存在的数据
92
- @param update_columns: 需要更新的列,默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
93
- @param update_columns_value: 需要更新的列的值,默认为datas里边对应的值,注意如果值为字符串类型需要主动加单引号,如 update_columns_value=("'test'",)
94
- ---------
95
- @result:
96
- """
97
- if not datas:
98
- return
99
-
100
- keys = list(set([key for data in datas for key in data]))
101
- # values_placeholder = ["%s"] * len(keys)
102
- values = []
103
- for data in datas:
104
- # 检查 data 是否是字典类型
105
- if not isinstance(data, dict):
106
- # 如果 data 不是字典,记录错误日志并打印 data 的内容和类型
107
- # logger.error(f"期望的数据类型是字典,但实际得到: {data} (类型: {type(data)})")
108
- continue # 跳过非字典类型的 data,继续处理下一个数据
109
-
110
- value = []
111
- for key in keys:
112
- # 从字典中获取当前 key 对应的值
113
- current_data = data.get(key)
114
- try:
115
- # 对值进行格式化处理
116
- current_data = format_sql_value(current_data)
117
- value.append(current_data) # 将处理后的值添加到列表中
118
- except Exception as e:
119
- # 如果格式化失败,记录错误日志
120
- logger.error(f"{key}: {current_data} (类型: {type(current_data)}) -> {e}")
121
-
122
- # 将处理后的值列表添加到 values 中
123
- values.append(value)
124
- keys_str = ", ".join(["`{}`".format(key) for key in keys])
125
- placeholders_str = ", ".join(["%s"] * len(keys))
126
-
127
- if update_columns:
128
- if not isinstance(update_columns, (tuple, list)):
129
- update_columns = [update_columns]
130
- if update_columns_value:
131
- update_columns_ = ", ".join(
132
- [
133
- "`{key}`={value}".format(key=key, value=value)
134
- for key, value in zip(update_columns, update_columns_value)
135
- ]
136
- )
137
- else:
138
- # 修改这里,使用 VALUES() 函数来引用插入的值
139
- update_columns_ = ", ".join(
140
- ["`{key}`=VALUES(`{key}`)".format(key=key) for key in update_columns]
141
- )
142
-
143
- sql = f"INSERT INTO `{table}` ({keys_str}) VALUES ({placeholders_str}) ON DUPLICATE KEY UPDATE {update_columns_}"
144
- elif auto_update:
145
- sql = "REPLACE INTO `{table}` ({keys}) VALUES ({values_placeholder})".format(
146
- table=table, keys=keys_str, values_placeholder=placeholders_str
147
- )
148
- else:
149
- sql = "INSERT IGNORE INTO `{table}` ({keys}) VALUES ({values_placeholder})".format(
150
- table=table, keys=keys_str, values_placeholder=placeholders_str
151
- )
152
- return sql, values
153
-
154
-
155
- def format_sql_value(value):
156
- """
157
- 格式化 SQL 值
158
- """
159
- if value is None:
160
- return None # 处理 NULL 值
161
-
162
- # 确保处理字符串
163
- if isinstance(value, str):
164
- return value.strip() # 去除首尾空格
165
-
166
- # 处理列表或元组类型
167
- elif isinstance(value, (list, tuple)):
168
- try:
169
- return dumps_json(value) # 将其转为 JSON 字符串
170
- except Exception as e:
171
- raise ValueError(f"Failed to serialize list/tuple to JSON: {value}, error: {e}")
172
-
173
- # 处理字典类型
174
- elif isinstance(value, dict):
175
- try:
176
- return dumps_json(value) # 将其转为 JSON 字符串
177
- except Exception as e:
178
- raise ValueError(f"Failed to serialize dict to JSON: {value}, error: {e}")
179
-
180
- # 处理布尔类型
181
- elif isinstance(value, bool):
182
- return int(value) # 转为整数
183
-
184
- # 确保数值类型优先匹配
185
- elif isinstance(value, (int, float)):
186
- return value # 返回数值
187
-
188
- # 处理日期、时间类型
189
- elif isinstance(value, (date, time, datetime)):
190
- return str(value) # 转换为字符串表示
191
-
192
- # 如果遇到无法处理的类型,抛出异常
193
- else:
194
- raise TypeError(f"Unsupported value type: {type(value)}, value: {value}")
195
-
196
-
197
-
198
-
199
- def list2str(datas):
200
- """
201
- 列表转字符串
202
- :param datas: [1, 2]
203
- :return: (1, 2)
204
- """
205
- data_str = str(tuple(datas))
206
- data_str = re.sub(r",\)$", ")", data_str)
207
- return data_str
208
-
209
- _REGEXPS = {}
210
-
211
- def get_info(html, regexps, allow_repeat=True, fetch_one=False, split=None):
212
- regexps = isinstance(regexps, str) and [regexps] or regexps
213
-
214
- infos = []
215
- for regex in regexps:
216
- if regex == "":
217
- continue
218
-
219
- if regex not in _REGEXPS.keys():
220
- _REGEXPS[regex] = re.compile(regex, re.S)
221
-
222
- if fetch_one:
223
- infos = _REGEXPS[regex].search(html)
224
- if infos:
225
- infos = infos.groups()
226
- else:
227
- continue
228
- else:
229
- infos = _REGEXPS[regex].findall(str(html))
230
-
231
- if len(infos) > 0:
232
- break
233
-
234
- if fetch_one:
235
- infos = infos if infos else ("",)
236
- return infos if len(infos) > 1 else infos[0]
237
- else:
238
- infos = allow_repeat and infos or sorted(set(infos), key=infos.index)
239
- infos = split.join(infos) if split else infos
240
- return infos
241
-
242
-
243
- def get_json(json_str):
244
- """
245
- @summary: 取json对象
246
- ---------
247
- @param json_str: json格式的字符串
248
- ---------
249
- @result: 返回json对象
250
- """
251
-
252
- try:
253
- return json.loads(json_str) if json_str else {}
254
- except Exception as e1:
255
- try:
256
- json_str = json_str.strip()
257
- json_str = json_str.replace("'", '"')
258
- keys = get_info(json_str, r"(\w+):")
259
- for key in keys:
260
- json_str = json_str.replace(key, '"%s"' % key)
261
-
262
- return json.loads(json_str) if json_str else {}
263
-
264
- except Exception as e2:
265
- logger.error(
266
- """
267
- e1: %s
268
- format json_str: %s
269
- e2: %s
270
- """
271
- % (e1, json_str, e2)
272
- )
273
-
274
- return {}
275
-
276
-
277
- def dumps_json(data, indent=4, sort_keys=False):
278
- """
279
- @summary: 格式化json 用于打印
280
- ---------
281
- @param data: json格式的字符串或json对象
282
- @param indent:
283
- @param sort_keys:
284
- ---------
285
- @result: 格式化后的字符串
286
- """
287
- try:
288
- if isinstance(data, str):
289
- data = get_json(data)
290
-
291
- data = json.dumps(
292
- data,
293
- ensure_ascii=False,
294
- indent=indent,
295
- skipkeys=True,
296
- sort_keys=sort_keys,
297
- default=str,
298
- )
299
-
300
- except Exception as e:
301
- data = pformat(data)
302
-
303
- return data
1
+ def custom_extractor_proxy(data: dict, key: str='proxy') -> dict | str | None:
2
+ """只负责从 API 返回数据中提取代理部分"""
3
+ if data.get("status") == 0:
4
+ return data.get(key) # 返回 {"http": "...", "https": "..."} 整个字典
5
+ return None
crawlo/utils/url.py CHANGED
@@ -1,40 +1,40 @@
1
- from urllib.parse import urldefrag
2
- from w3lib.url import add_or_replace_parameter
3
-
4
-
5
- def escape_ajax(url: str) -> str:
6
- """
7
- 根据Google AJAX爬取规范转换URL(处理哈希片段#!):
8
- https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
9
-
10
- 规则说明:
11
- 1. 仅当URL包含 `#!` 时才转换(表示这是AJAX可爬取页面)
12
- 2. 将 `#!key=value` 转换为 `?_escaped_fragment_=key%3Dvalue`
13
- 3. 保留原始查询参数(如果有)
14
-
15
- 示例:
16
- >>> escape_ajax("www.example.com/ajax.html#!key=value")
17
- 'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
18
- >>> escape_ajax("www.example.com/ajax.html?k1=v1#!key=value")
19
- 'www.example.com/ajax.html?k1=v1&_escaped_fragment_=key%3Dvalue'
20
- >>> escape_ajax("www.example.com/ajax.html#!")
21
- 'www.example.com/ajax.html?_escaped_fragment_='
22
-
23
- 非AJAX可爬取的URL(无#!)原样返回:
24
- >>> escape_ajax("www.example.com/ajax.html#normal")
25
- 'www.example.com/ajax.html#normal'
26
- """
27
- # 分离URL的基础部分和哈希片段
28
- de_frag, frag = urldefrag(url)
29
-
30
- # 仅处理以"!"开头的哈希片段(Google规范)
31
- if not frag.startswith("!"):
32
- return url # 不符合规则则原样返回
33
-
34
- # 调用辅助函数添加 `_escaped_fragment_` 参数
35
- return add_or_replace_parameter(de_frag, "_escaped_fragment_", frag[1:])
36
-
37
-
38
- if __name__ == '__main__':
39
- f = escape_ajax('http://example.com/page#!')
1
+ from urllib.parse import urldefrag
2
+ from w3lib.url import add_or_replace_parameter
3
+
4
+
5
+ def escape_ajax(url: str) -> str:
6
+ """
7
+ 根据Google AJAX爬取规范转换URL(处理哈希片段#!):
8
+ https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
9
+
10
+ 规则说明:
11
+ 1. 仅当URL包含 `#!` 时才转换(表示这是AJAX可爬取页面)
12
+ 2. 将 `#!key=value` 转换为 `?_escaped_fragment_=key%3Dvalue`
13
+ 3. 保留原始查询参数(如果有)
14
+
15
+ 示例:
16
+ >>> escape_ajax("www.example.com/ajax.html#!key=value")
17
+ 'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
18
+ >>> escape_ajax("www.example.com/ajax.html?k1=v1#!key=value")
19
+ 'www.example.com/ajax.html?k1=v1&_escaped_fragment_=key%3Dvalue'
20
+ >>> escape_ajax("www.example.com/ajax.html#!")
21
+ 'www.example.com/ajax.html?_escaped_fragment_='
22
+
23
+ 非AJAX可爬取的URL(无#!)原样返回:
24
+ >>> escape_ajax("www.example.com/ajax.html#normal")
25
+ 'www.example.com/ajax.html#normal'
26
+ """
27
+ # 分离URL的基础部分和哈希片段
28
+ de_frag, frag = urldefrag(url)
29
+
30
+ # 仅处理以"!"开头的哈希片段(Google规范)
31
+ if not frag.startswith("!"):
32
+ return url # 不符合规则则原样返回
33
+
34
+ # 调用辅助函数添加 `_escaped_fragment_` 参数
35
+ return add_or_replace_parameter(de_frag, "_escaped_fragment_", frag[1:])
36
+
37
+
38
+ if __name__ == '__main__':
39
+ f = escape_ajax('http://example.com/page#!')
40
40
  print(f)
@@ -1,48 +1,49 @@
1
- Metadata-Version: 2.4
2
- Name: crawlo
3
- Version: 1.0.4
4
- Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
- Home-page: https://github.com/crawl-coder/Crawlo.git
6
- Author: crawl-coder
7
- Author-email: crawlo@qq.com
8
- License: MIT
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Operating System :: OS Independent
12
- Requires-Python: >=3.6
13
- Description-Content-Type: text/markdown
14
- Requires-Dist: aiohttp>=3.12.14
15
- Requires-Dist: aiomysql>=0.2.0
16
- Requires-Dist: aioredis>=2.0.1
17
- Requires-Dist: asyncmy>=0.2.10
18
- Requires-Dist: cssselect>=1.2.0
19
- Requires-Dist: dateparser>=1.2.2
20
- Requires-Dist: httpx[http2]>=0.27.0
21
- Requires-Dist: lxml>=5.2.1
22
- Requires-Dist: motor>=3.7.0
23
- Requires-Dist: parsel>=1.9.1
24
- Requires-Dist: pydantic>=2.11.7
25
- Requires-Dist: pymongo>=4.11
26
- Requires-Dist: PyMySQL>=1.1.1
27
- Requires-Dist: python-dateutil>=2.9.0.post0
28
- Requires-Dist: redis>=6.2.0
29
- Requires-Dist: requests>=2.32.4
30
- Requires-Dist: six>=1.17.0
31
- Requires-Dist: ujson>=5.9.0
32
- Requires-Dist: urllib3>=2.5.0
33
- Requires-Dist: w3lib>=2.1.2
34
- Provides-Extra: render
35
- Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
36
- Requires-Dist: playwright; extra == "render"
37
- Requires-Dist: selenium>=3.141.0; extra == "render"
38
- Provides-Extra: all
39
- Requires-Dist: bitarray>=1.5.3; extra == "all"
40
- Requires-Dist: PyExecJS>=1.5.1; extra == "all"
41
- Requires-Dist: pymongo>=3.10.1; extra == "all"
42
- Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
43
- Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
44
- Requires-Dist: playwright; extra == "all"
45
- Requires-Dist: selenium>=3.141.0; extra == "all"
46
-
47
- # Crawlo
48
- Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取与数据管道。
1
+ Metadata-Version: 2.4
2
+ Name: crawlo
3
+ Version: 1.0.5
4
+ Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
+ Home-page: https://github.com/crawl-coder/Crawlo.git
6
+ Author: crawl-coder
7
+ Author-email: crawlo@qq.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ Requires-Dist: aiohttp>=3.12.14
15
+ Requires-Dist: aiomysql>=0.2.0
16
+ Requires-Dist: aioredis>=2.0.1
17
+ Requires-Dist: asyncmy>=0.2.10
18
+ Requires-Dist: cssselect>=1.2.0
19
+ Requires-Dist: dateparser>=1.2.2
20
+ Requires-Dist: httpx[http2]>=0.27.0
21
+ Requires-Dist: curl-cffi>=0.13.0
22
+ Requires-Dist: lxml>=5.2.1
23
+ Requires-Dist: motor>=3.7.0
24
+ Requires-Dist: parsel>=1.9.1
25
+ Requires-Dist: pydantic>=2.11.7
26
+ Requires-Dist: pymongo>=4.11
27
+ Requires-Dist: PyMySQL>=1.1.1
28
+ Requires-Dist: python-dateutil>=2.9.0.post0
29
+ Requires-Dist: redis>=6.2.0
30
+ Requires-Dist: requests>=2.32.4
31
+ Requires-Dist: six>=1.17.0
32
+ Requires-Dist: ujson>=5.9.0
33
+ Requires-Dist: urllib3>=2.5.0
34
+ Requires-Dist: w3lib>=2.1.2
35
+ Provides-Extra: render
36
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
37
+ Requires-Dist: playwright; extra == "render"
38
+ Requires-Dist: selenium>=3.141.0; extra == "render"
39
+ Provides-Extra: all
40
+ Requires-Dist: bitarray>=1.5.3; extra == "all"
41
+ Requires-Dist: PyExecJS>=1.5.1; extra == "all"
42
+ Requires-Dist: pymongo>=3.10.1; extra == "all"
43
+ Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
44
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
45
+ Requires-Dist: playwright; extra == "all"
46
+ Requires-Dist: selenium>=3.141.0; extra == "all"
47
+
48
+ # Crawlo
49
+ Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取与数据管道。
@@ -0,0 +1,84 @@
1
+ crawlo/__init__.py,sha256=xpiIAZbSG3CzneJuDLPCbwfRcvw2wyHYl2kJjaNfNGY,584
2
+ crawlo/__version__.py,sha256=lfZikIZ2prlMV6RkxhMRZj5dAeD0TCswIWS46kSjXw0,23
3
+ crawlo/crawler.py,sha256=2izxy7-0yD8n_FsLSL_NaoFYdqQWIhm0hsoSLKgnPcA,16919
4
+ crawlo/event.py,sha256=ZhoPW5CglCEuZNFEwviSCBIw0pT5O6jT98bqYrDFd3E,324
5
+ crawlo/exceptions.py,sha256=VUFSOS00BPWMcH8EW5MgMDhXlUlaFeEcsgqbS_e8MoU,1119
6
+ crawlo/stats_collector.py,sha256=v4jC9BAe-23w93hWzbeMCCgQ9VuFPyxw5JV9ItbGH8w,1636
7
+ crawlo/subscriber.py,sha256=udlHeTR0ymGQhCDxVUGwUzeeeR4TYCEJrJwFnkgr0cU,3836
8
+ crawlo/task_manager.py,sha256=D9m-nqnGj-FZPtGk4CdwZX3Gw7IWyYvTS7CHpRGWc_w,748
9
+ crawlo/core/__init__.py,sha256=JYSAn15r8yWgRK_Nc69t_8tZCyb70MiPZKssA8wrYz0,43
10
+ crawlo/core/engine.py,sha256=JFHooPp-5cfHSyxEh87nOOR5NMaPLVDfNSqAsbtx4PM,6030
11
+ crawlo/core/processor.py,sha256=oHLs-cno0bJGTNc9NGD2S7_2-grI3ruvggO0SY2mf3Q,1180
12
+ crawlo/core/scheduler.py,sha256=ZMPs4LSs69FsFfDTvaOMJKqpSQQGvIEE9pMyYVVAA64,1948
13
+ crawlo/downloader/__init__.py,sha256=72u2Hef4HaMfs9VCqEjbMtiaRXbaXmgNiJn6qy09LHs,2384
14
+ crawlo/downloader/aiohttp_downloader.py,sha256=YfvYCDp3y0OsVyfdYX1XJC3EcCrbNLKOcFY8b7JC3_w,7675
15
+ crawlo/downloader/cffi_downloader.py,sha256=QthBmZOE0cjYNRTM-449EuaFuqxxdc19kp93iqOlwB8,12678
16
+ crawlo/downloader/httpx_downloader.py,sha256=yshb1JZa4B6hcVwIT97SrxCXkj3HJqT9IDpxSxjkJm4,11754
17
+ crawlo/extension/__init__.py,sha256=O2BVK1U3WwmurZb-PaYVz3g1tZ_iYUjCwilmUKf6844,1170
18
+ crawlo/extension/log_interval.py,sha256=FOWeTOuWtOpCz2UPV5F_--QIa8yomltSpjxbw3F7bkU,1971
19
+ crawlo/extension/log_stats.py,sha256=JFJsdK7UWkhP4TEAF-H-S7SpQbDpBryS0AT6e6jZCBo,1721
20
+ crawlo/extension/logging_extension.py,sha256=rty2_up53KV05nCazuBuz2ZapHKq0ti7mGVBzMTr0ak,1236
21
+ crawlo/filters/__init__.py,sha256=9fJQRVkxWWPChajYbAGe1O6UYB639xWt0hiLUGBs4hQ,1014
22
+ crawlo/filters/aioredis_filter.py,sha256=phBFW9Z28oylbik9Kb2WHM65Wo5yRAH2w9Yz0_2HaOQ,5621
23
+ crawlo/filters/memory_filter.py,sha256=L8XEJkObOxs4BzYpQvk9PVM969k2LE61VFsnEOTEf_E,6841
24
+ crawlo/items/__init__.py,sha256=o5BSpS1Byivr-bpdfFgc9GCoGi8ThNuPJiTW7lz85-I,2125
25
+ crawlo/items/items.py,sha256=0jf-CdZFkgDAevYn8PmSgGhf6iYu3bx1sv87hJbFtF4,3891
26
+ crawlo/middleware/__init__.py,sha256=PSwpRLdBUopaQzBp1S0zK_TZbrRagQ4yzvgyLy4tBk8,570
27
+ crawlo/middleware/default_header.py,sha256=OVW4vpRPp3Y6qYXtiEYlGqVjCYcbuv1Iecc7zEgwCsI,1099
28
+ crawlo/middleware/download_delay.py,sha256=P2eyAJXwdLdC4yYuLhvKZVa1b5YQvQD0GpsR8aDW8-8,994
29
+ crawlo/middleware/middleware_manager.py,sha256=Vfkasi8YaLxzGrOrFYfxOMEGRS8XocqeQMruLtVxL_c,6360
30
+ crawlo/middleware/proxy.py,sha256=PiIfhRXfcMzBtW_p7jfR8rGxcM4VT68Mk54swbaV2H4,9801
31
+ crawlo/middleware/request_ignore.py,sha256=jdybWFVXuA5YsAPfZJFzLTWkYhEAewNgxuhFqczPW9M,1027
32
+ crawlo/middleware/response_code.py,sha256=vgXWv3mMu_v9URvhKA9myIFH4u6L4EwNme80wL4DCGc,677
33
+ crawlo/middleware/response_filter.py,sha256=O2gkV_Yjart8kmmXTGzrtZnb_Uuefap4uL2Cu01iRs4,863
34
+ crawlo/middleware/retry.py,sha256=a2EmigYFzL8oxd50JhrSe5XbYJyx8yDjOjE5fXAOFhY,3459
35
+ crawlo/network/__init__.py,sha256=DVz1JpasjxCgOlXvm76gz-S18OXr4emG_J39yi5iVuA,130
36
+ crawlo/network/request.py,sha256=qd50mmrXS6yZKmAb6ERAMHzm2Ln80Wu5NSMwx_t1IGc,7247
37
+ crawlo/network/response.py,sha256=z2Owti_9ds567jLvfuX8hrfdQL8JKn5lkt2QOc-Gi3Y,6200
38
+ crawlo/pipelines/__init__.py,sha256=IbXJ6B8LqxVVjeLNgL_12AxV6zbV8hNRQxAfMLjjSaw,273
39
+ crawlo/pipelines/console_pipeline.py,sha256=bwe5hZgaVSWmh3R8XpOaaeAjJme-Ttrpo6G6f1cnLIg,1287
40
+ crawlo/pipelines/mongo_pipeline.py,sha256=Yr48D0T61-_Y-EpgWXf7BUn9w8e-Pj5P07QDSPZ0pYU,4558
41
+ crawlo/pipelines/mysql_batch_pipline.py,sha256=Mj3PReDRw22JhJ5hZxnka4cirKq3kEbOCNhgpq1gvfA,10611
42
+ crawlo/pipelines/mysql_pipeline.py,sha256=bsAFqpxrCijzvX-IusxOtvTvQEUCt5uHNTyYMo_pIq4,8056
43
+ crawlo/pipelines/pipeline_manager.py,sha256=k-Rg0os0Havrov99D-Jn3ROpnz154K30tf7aARE5W3k,2174
44
+ crawlo/settings/__init__.py,sha256=NgYFLfk_Bw7h6KSoepJn_lMBSqVbCHebjKxaE3_eMgw,130
45
+ crawlo/settings/default_settings.py,sha256=CH95c2oBmxy6t6bGLUuqSL8vJ3Z5Psicdfpc9W0MG90,7309
46
+ crawlo/settings/setting_manager.py,sha256=4xXOzKwZCgAp8ybwvVcs2R--CsOD7c6dBIkj6DJHB3c,2998
47
+ crawlo/spider/__init__.py,sha256=lWi0bCR7HLT5bnj7_e9UIgFJjuqoeWtbwADfNkaajug,1139
48
+ crawlo/templates/item_template.tmpl,sha256=0bGFnlwJRqstxMNEj1H_pEICybwoueRhs31QaDPXrS0,372
49
+ crawlo/templates/spider_template.tmpl,sha256=JzphuA87Yl_F1xR9zOIi_ZSazyT8eSNPxYYPMv3Uiko,835
50
+ crawlo/templates/project_template/main.py,sha256=BcCP294ycCPsHi_AMN7OAJtcrLvQdf91meH93PqbQgs,626
51
+ crawlo/templates/project_template/setting.py,sha256=Ce4nMbrdhL1ioRdTcB0vV_vK_50cfnwVqSvt49QsNkA,9395
52
+ crawlo/templates/project_template/items/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
+ crawlo/templates/project_template/spiders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
+ crawlo/utils/__init__.py,sha256=XCYumI8wJ1jU_Myn_K0LT-LVygPDUCdETCbXM3EWvlo,130
55
+ crawlo/utils/concurrency_manager.py,sha256=o-_cfeUHdlBOM3eAXF857MtekSrRcVTBJ2jWZvY6weQ,5230
56
+ crawlo/utils/date_tools.py,sha256=lcEFP2Z5b-6pUTHczrzCCuqiHP_4_2zamomMGPZrExo,7194
57
+ crawlo/utils/db_helper.py,sha256=ZqOt1d3mErVv4TOvoWlov0niUxORB9aHByTmMoNFIDw,10902
58
+ crawlo/utils/func_tools.py,sha256=y-TYP9H3X67MS_foWy9Z2LIS6GP7Y4Cy3T168ulq3Jc,2451
59
+ crawlo/utils/log.py,sha256=YD2FfXuuE2MC9ZdQQZ0H7KysE7l_LHZqQepaTPlcApo,4133
60
+ crawlo/utils/pqueue.py,sha256=HDgX4HAkc7RqYUtX6q51tzI1ZRTACf8P_4jLqC4-uC0,5559
61
+ crawlo/utils/project.py,sha256=FfBaMfxcau4yL59O-DfD7FAii8k6gXWQmQ1YU6aaUCE,1544
62
+ crawlo/utils/request.py,sha256=ejdKpTwc-HE04HQybafhOVywzz57IV3pY0YMkSLyGUo,9065
63
+ crawlo/utils/system.py,sha256=24zGmtHNhDFMGVo7ftMV-Pqg6_5d63zsyNey9udvJJk,248
64
+ crawlo/utils/tools.py,sha256=uy7qw5Z1BIhyEgiHENvtM7WoGCJxlS8EX3PmOA7ouCo,275
65
+ crawlo/utils/url.py,sha256=RKe_iqdjafsNcp-P2GVLYpsL1qbxiuZLiFc-SqOQkcs,1521
66
+ examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
+ examples/gxb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
+ examples/gxb/items.py,sha256=s4uKo3kKlN2DC4Y4muwp_qzG6kdyhzOVLEjXv7Qvi7c,995
69
+ examples/gxb/run.py,sha256=YLtlUB6GEAHLuLaTOt8HSOyAF1ZBdjSAwR9rJ2prUSs,340
70
+ examples/gxb/settings.py,sha256=JqwnEkZ0wZZ1f43I2Ne9yu1LnEBBiH2rVG2iDKZC1Q8,2321
71
+ examples/gxb/spider/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
+ examples/gxb/spider/miit_spider.py,sha256=tcQnuyUHfu-Re1QbKKSI9DXW3Sp1vyBW8qBzKLf_RC4,6666
73
+ examples/gxb/spider/telecom_device_licenses.py,sha256=t-XFai7e4itfGR4zeTJVJ1ulhfj-92gIgISqqdOwdag,4938
74
+ tests/__init__.py,sha256=409aRX8hsPffiZCVjOogtxwhACzBp8G2UTJyUQSxhK0,136
75
+ tests/test_proxy_health_check.py,sha256=_tDlxa_6TdL3M5RLkHF82roXJ8WIuG5hELBp2GADyKQ,1123
76
+ tests/test_proxy_middleware_integration.py,sha256=mTPK_XvbmLCV_QoVZzA3ybWOOX61493Ew78WfTp-bYQ,4441
77
+ tests/test_proxy_providers.py,sha256=u_R2fhab90vqvQEaOAztpAOe9tJXvUMIdoDxmStmXJ4,1749
78
+ tests/test_proxy_stats.py,sha256=ES00CEoDITYPFBGPk8pecFzD3ItYIv6NSpcqNd8-kvo,526
79
+ tests/test_proxy_strategies.py,sha256=9Z1pXmTNyw-eIhGXlf2abZbJx6igLohYq-_3hldQ5uE,1868
80
+ crawlo-1.0.5.dist-info/METADATA,sha256=IC9lzZIPUOEZdBXsSZkkd0CpkFuYChtuNtSasgO-O6M,1825
81
+ crawlo-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
82
+ crawlo-1.0.5.dist-info/entry_points.txt,sha256=GD9PBhKQN83EaxPYtz7NhcGeZeh3bdr2jWbTixOs-lw,59
83
+ crawlo-1.0.5.dist-info/top_level.txt,sha256=keG_67pbZ_wZL2dmDRA9RMaNHTaV_x_oxZ9DKNgwvR0,22
84
+ crawlo-1.0.5.dist-info/RECORD,,
@@ -1,2 +1,3 @@
1
1
  crawlo
2
+ examples
2
3
  tests
examples/__init__.py ADDED
File without changes
File without changes
examples/gxb/items.py ADDED
@@ -0,0 +1,36 @@
1
+ from crawlo.items.items import Item, Field
2
+
3
+ class RadioApprovalItem(Item):
4
+ approval_number = Field()
5
+ device_name = Field()
6
+ device_model = Field()
7
+ applicant = Field()
8
+ remarks = Field()
9
+ validity_period = Field()
10
+ frequency_tolerance = Field()
11
+ frequency_range = Field()
12
+ transmit_power = Field()
13
+ occupied_bandwidth = Field()
14
+ spurious_emission_limit = Field()
15
+ issue_date = Field()
16
+ approval_code = Field()
17
+ cmiit_id = Field()
18
+ modulation_mode = Field()
19
+ technology_system = Field()
20
+ mid = Field()
21
+
22
+ class TelecomLicenseItem(Item):
23
+ license_number = Field()
24
+ device_name = Field()
25
+ device_model = Field()
26
+ applicant = Field()
27
+ manufacturer = Field()
28
+ issue_date = Field()
29
+ expiry_date = Field()
30
+ certificate_type = Field()
31
+ remarks = Field()
32
+ certificate_status = Field()
33
+ origin = Field()
34
+ article_id = Field()
35
+ article_edit_date = Field()
36
+ create_time = Field()
examples/gxb/run.py ADDED
@@ -0,0 +1,15 @@
1
+ import asyncio
2
+
3
+ from crawlo.crawler import CrawlerProcess
4
+ from examples.gxb.spider.telecom_device_licenses import TelecomDeviceLicensesSpider
5
+
6
+ async def main():
7
+ process = CrawlerProcess()
8
+ await process.crawl(
9
+ [TelecomDeviceLicensesSpider]
10
+ )
11
+
12
+
13
+ if __name__ == '__main__':
14
+ asyncio.run(main())
15
+ # 132023