crawlo 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (112) hide show
  1. crawlo/__init__.py +25 -9
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +41 -0
  4. crawlo/commands/__init__.py +10 -0
  5. crawlo/commands/genspider.py +111 -0
  6. crawlo/commands/run.py +149 -0
  7. crawlo/commands/startproject.py +101 -0
  8. crawlo/core/__init__.py +2 -2
  9. crawlo/core/engine.py +158 -158
  10. crawlo/core/processor.py +40 -40
  11. crawlo/core/scheduler.py +57 -57
  12. crawlo/crawler.py +219 -242
  13. crawlo/downloader/__init__.py +78 -78
  14. crawlo/downloader/aiohttp_downloader.py +200 -259
  15. crawlo/downloader/cffi_downloader.py +277 -0
  16. crawlo/downloader/httpx_downloader.py +246 -187
  17. crawlo/event.py +11 -11
  18. crawlo/exceptions.py +78 -64
  19. crawlo/extension/__init__.py +31 -31
  20. crawlo/extension/log_interval.py +49 -49
  21. crawlo/extension/log_stats.py +44 -44
  22. crawlo/extension/logging_extension.py +35 -0
  23. crawlo/filters/__init__.py +37 -37
  24. crawlo/filters/aioredis_filter.py +150 -150
  25. crawlo/filters/memory_filter.py +202 -202
  26. crawlo/items/__init__.py +22 -62
  27. crawlo/items/base.py +31 -0
  28. crawlo/items/fields.py +54 -0
  29. crawlo/items/items.py +105 -119
  30. crawlo/middleware/__init__.py +21 -21
  31. crawlo/middleware/default_header.py +32 -32
  32. crawlo/middleware/download_delay.py +28 -28
  33. crawlo/middleware/middleware_manager.py +135 -140
  34. crawlo/middleware/proxy.py +246 -0
  35. crawlo/middleware/request_ignore.py +30 -30
  36. crawlo/middleware/response_code.py +18 -18
  37. crawlo/middleware/response_filter.py +26 -26
  38. crawlo/middleware/retry.py +90 -90
  39. crawlo/network/__init__.py +7 -7
  40. crawlo/network/request.py +203 -204
  41. crawlo/network/response.py +166 -166
  42. crawlo/pipelines/__init__.py +13 -13
  43. crawlo/pipelines/console_pipeline.py +39 -39
  44. crawlo/pipelines/mongo_pipeline.py +116 -116
  45. crawlo/pipelines/mysql_batch_pipline.py +273 -134
  46. crawlo/pipelines/mysql_pipeline.py +195 -195
  47. crawlo/pipelines/pipeline_manager.py +56 -56
  48. crawlo/settings/__init__.py +7 -7
  49. crawlo/settings/default_settings.py +169 -94
  50. crawlo/settings/setting_manager.py +99 -99
  51. crawlo/spider/__init__.py +41 -36
  52. crawlo/stats_collector.py +59 -59
  53. crawlo/subscriber.py +106 -106
  54. crawlo/task_manager.py +27 -27
  55. crawlo/templates/crawlo.cfg.tmpl +11 -0
  56. crawlo/templates/project/__init__.py.tmpl +4 -0
  57. crawlo/templates/project/items.py.tmpl +18 -0
  58. crawlo/templates/project/middlewares.py.tmpl +76 -0
  59. crawlo/templates/project/pipelines.py.tmpl +64 -0
  60. crawlo/templates/project/settings.py.tmpl +54 -0
  61. crawlo/templates/project/spiders/__init__.py.tmpl +6 -0
  62. crawlo/templates/spider/spider.py.tmpl +32 -0
  63. crawlo/utils/__init__.py +7 -7
  64. crawlo/utils/concurrency_manager.py +124 -124
  65. crawlo/utils/date_tools.py +233 -177
  66. crawlo/utils/db_helper.py +344 -0
  67. crawlo/utils/func_tools.py +82 -82
  68. crawlo/utils/log.py +129 -39
  69. crawlo/utils/pqueue.py +173 -173
  70. crawlo/utils/project.py +199 -59
  71. crawlo/utils/request.py +267 -122
  72. crawlo/utils/spider_loader.py +63 -0
  73. crawlo/utils/system.py +11 -11
  74. crawlo/utils/tools.py +5 -303
  75. crawlo/utils/url.py +39 -39
  76. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/METADATA +49 -48
  77. crawlo-1.0.6.dist-info/RECORD +94 -0
  78. crawlo-1.0.6.dist-info/entry_points.txt +2 -0
  79. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/top_level.txt +1 -0
  80. examples/gxb/items.py +36 -0
  81. examples/gxb/run.py +16 -0
  82. examples/gxb/settings.py +72 -0
  83. examples/gxb/spider/__init__.py +0 -0
  84. examples/gxb/spider/miit_spider.py +180 -0
  85. examples/gxb/spider/telecom_device.py +129 -0
  86. tests/__init__.py +7 -7
  87. tests/test_proxy_health_check.py +33 -0
  88. tests/test_proxy_middleware_integration.py +137 -0
  89. tests/test_proxy_providers.py +57 -0
  90. tests/test_proxy_stats.py +20 -0
  91. tests/test_proxy_strategies.py +60 -0
  92. crawlo/downloader/playwright_downloader.py +0 -161
  93. crawlo/templates/item_template.tmpl +0 -22
  94. crawlo/templates/project_template/main.py +0 -33
  95. crawlo/templates/project_template/setting.py +0 -190
  96. crawlo/templates/spider_template.tmpl +0 -31
  97. crawlo-1.0.4.dist-info/RECORD +0 -79
  98. crawlo-1.0.4.dist-info/entry_points.txt +0 -2
  99. tests/baidu_spider/__init__.py +0 -7
  100. tests/baidu_spider/demo.py +0 -94
  101. tests/baidu_spider/items.py +0 -25
  102. tests/baidu_spider/middleware.py +0 -49
  103. tests/baidu_spider/pipeline.py +0 -55
  104. tests/baidu_spider/request_fingerprints.txt +0 -9
  105. tests/baidu_spider/run.py +0 -27
  106. tests/baidu_spider/settings.py +0 -80
  107. tests/baidu_spider/spiders/__init__.py +0 -7
  108. tests/baidu_spider/spiders/bai_du.py +0 -61
  109. tests/baidu_spider/spiders/sina.py +0 -79
  110. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/WHEEL +0 -0
  111. {crawlo/templates/project_template/items → examples}/__init__.py +0 -0
  112. {crawlo/templates/project_template/spiders → examples/gxb}/__init__.py +0 -0
crawlo/utils/tools.py CHANGED
@@ -1,303 +1,5 @@
1
- import json
2
- import re
3
- from pprint import pformat
4
- from datetime import date, time, datetime
5
-
6
- from crawlo.utils.log import get_logger
7
-
8
-
9
- logger = get_logger(__name__)
10
-
11
-
12
- def make_insert_sql(
13
- table, data, auto_update=False, update_columns=(), insert_ignore=False
14
- ):
15
- """
16
- @summary: 适用于mysql
17
- ---------
18
- @param table:
19
- @param data: 表数据 json格式
20
- @param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
21
- @param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
22
- @param insert_ignore: 数据存在忽略
23
- ---------
24
- @result:
25
- """
26
-
27
- keys = ["`{}`".format(key) for key in data.keys()]
28
- keys = list2str(keys).replace("'", "")
29
-
30
- values = [format_sql_value(value) for value in data.values()]
31
- values = list2str(values)
32
-
33
- if update_columns:
34
- if not isinstance(update_columns, (tuple, list)):
35
- update_columns = [update_columns]
36
- update_columns_ = ", ".join(
37
- ["{key}=values({key})".format(key=key) for key in update_columns]
38
- )
39
- sql = (
40
- "insert%s into `{table}` {keys} values {values} on duplicate key update %s"
41
- % (" ignore" if insert_ignore else "", update_columns_)
42
- )
43
-
44
- elif auto_update:
45
- sql = "replace into `{table}` {keys} values {values}"
46
- else:
47
- sql = "insert%s into `{table}` {keys} values {values}" % (
48
- " ignore" if insert_ignore else ""
49
- )
50
-
51
- sql = sql.format(table=table, keys=keys, values=values).replace("None", "null")
52
- return sql
53
-
54
-
55
- def make_update_sql(table, data, condition):
56
- """
57
- @summary: 适用于mysql, oracle数据库时间需要to_date 处理(TODO)
58
- ---------
59
- @param table:
60
- @param data: 表数据 json格式
61
- @param condition: where 条件
62
- ---------
63
- @result:
64
- """
65
- key_values = []
66
-
67
- for key, value in data.items():
68
- value = format_sql_value(value)
69
- if isinstance(value, str):
70
- key_values.append("`{}`={}".format(key, repr(value)))
71
- elif value is None:
72
- key_values.append("`{}`={}".format(key, "null"))
73
- else:
74
- key_values.append("`{}`={}".format(key, value))
75
-
76
- key_values = ", ".join(key_values)
77
-
78
- sql = "update `{table}` set {key_values} where {condition}"
79
- sql = sql.format(table=table, key_values=key_values, condition=condition)
80
- return sql
81
-
82
-
83
- def make_batch_sql(
84
- table, datas, auto_update=False, update_columns=(), update_columns_value=()
85
- ):
86
- """
87
- @summary: 生成批量的SQL
88
- ---------
89
- @param table:
90
- @param datas: 表数据 [{...}]
91
- @param auto_update: 使用的是replace into,为完全覆盖已存在的数据
92
- @param update_columns: 需要更新的列,默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
93
- @param update_columns_value: 需要更新的列的值,默认为datas里边对应的值,注意如果值为字符串类型需要主动加单引号,如 update_columns_value=("'test'",)
94
- ---------
95
- @result:
96
- """
97
- if not datas:
98
- return
99
-
100
- keys = list(set([key for data in datas for key in data]))
101
- # values_placeholder = ["%s"] * len(keys)
102
- values = []
103
- for data in datas:
104
- # 检查 data 是否是字典类型
105
- if not isinstance(data, dict):
106
- # 如果 data 不是字典,记录错误日志并打印 data 的内容和类型
107
- # logger.error(f"期望的数据类型是字典,但实际得到: {data} (类型: {type(data)})")
108
- continue # 跳过非字典类型的 data,继续处理下一个数据
109
-
110
- value = []
111
- for key in keys:
112
- # 从字典中获取当前 key 对应的值
113
- current_data = data.get(key)
114
- try:
115
- # 对值进行格式化处理
116
- current_data = format_sql_value(current_data)
117
- value.append(current_data) # 将处理后的值添加到列表中
118
- except Exception as e:
119
- # 如果格式化失败,记录错误日志
120
- logger.error(f"{key}: {current_data} (类型: {type(current_data)}) -> {e}")
121
-
122
- # 将处理后的值列表添加到 values 中
123
- values.append(value)
124
- keys_str = ", ".join(["`{}`".format(key) for key in keys])
125
- placeholders_str = ", ".join(["%s"] * len(keys))
126
-
127
- if update_columns:
128
- if not isinstance(update_columns, (tuple, list)):
129
- update_columns = [update_columns]
130
- if update_columns_value:
131
- update_columns_ = ", ".join(
132
- [
133
- "`{key}`={value}".format(key=key, value=value)
134
- for key, value in zip(update_columns, update_columns_value)
135
- ]
136
- )
137
- else:
138
- # 修改这里,使用 VALUES() 函数来引用插入的值
139
- update_columns_ = ", ".join(
140
- ["`{key}`=VALUES(`{key}`)".format(key=key) for key in update_columns]
141
- )
142
-
143
- sql = f"INSERT INTO `{table}` ({keys_str}) VALUES ({placeholders_str}) ON DUPLICATE KEY UPDATE {update_columns_}"
144
- elif auto_update:
145
- sql = "REPLACE INTO `{table}` ({keys}) VALUES ({values_placeholder})".format(
146
- table=table, keys=keys_str, values_placeholder=placeholders_str
147
- )
148
- else:
149
- sql = "INSERT IGNORE INTO `{table}` ({keys}) VALUES ({values_placeholder})".format(
150
- table=table, keys=keys_str, values_placeholder=placeholders_str
151
- )
152
- return sql, values
153
-
154
-
155
- def format_sql_value(value):
156
- """
157
- 格式化 SQL 值
158
- """
159
- if value is None:
160
- return None # 处理 NULL 值
161
-
162
- # 确保处理字符串
163
- if isinstance(value, str):
164
- return value.strip() # 去除首尾空格
165
-
166
- # 处理列表或元组类型
167
- elif isinstance(value, (list, tuple)):
168
- try:
169
- return dumps_json(value) # 将其转为 JSON 字符串
170
- except Exception as e:
171
- raise ValueError(f"Failed to serialize list/tuple to JSON: {value}, error: {e}")
172
-
173
- # 处理字典类型
174
- elif isinstance(value, dict):
175
- try:
176
- return dumps_json(value) # 将其转为 JSON 字符串
177
- except Exception as e:
178
- raise ValueError(f"Failed to serialize dict to JSON: {value}, error: {e}")
179
-
180
- # 处理布尔类型
181
- elif isinstance(value, bool):
182
- return int(value) # 转为整数
183
-
184
- # 确保数值类型优先匹配
185
- elif isinstance(value, (int, float)):
186
- return value # 返回数值
187
-
188
- # 处理日期、时间类型
189
- elif isinstance(value, (date, time, datetime)):
190
- return str(value) # 转换为字符串表示
191
-
192
- # 如果遇到无法处理的类型,抛出异常
193
- else:
194
- raise TypeError(f"Unsupported value type: {type(value)}, value: {value}")
195
-
196
-
197
-
198
-
199
- def list2str(datas):
200
- """
201
- 列表转字符串
202
- :param datas: [1, 2]
203
- :return: (1, 2)
204
- """
205
- data_str = str(tuple(datas))
206
- data_str = re.sub(r",\)$", ")", data_str)
207
- return data_str
208
-
209
- _REGEXPS = {}
210
-
211
- def get_info(html, regexps, allow_repeat=True, fetch_one=False, split=None):
212
- regexps = isinstance(regexps, str) and [regexps] or regexps
213
-
214
- infos = []
215
- for regex in regexps:
216
- if regex == "":
217
- continue
218
-
219
- if regex not in _REGEXPS.keys():
220
- _REGEXPS[regex] = re.compile(regex, re.S)
221
-
222
- if fetch_one:
223
- infos = _REGEXPS[regex].search(html)
224
- if infos:
225
- infos = infos.groups()
226
- else:
227
- continue
228
- else:
229
- infos = _REGEXPS[regex].findall(str(html))
230
-
231
- if len(infos) > 0:
232
- break
233
-
234
- if fetch_one:
235
- infos = infos if infos else ("",)
236
- return infos if len(infos) > 1 else infos[0]
237
- else:
238
- infos = allow_repeat and infos or sorted(set(infos), key=infos.index)
239
- infos = split.join(infos) if split else infos
240
- return infos
241
-
242
-
243
- def get_json(json_str):
244
- """
245
- @summary: 取json对象
246
- ---------
247
- @param json_str: json格式的字符串
248
- ---------
249
- @result: 返回json对象
250
- """
251
-
252
- try:
253
- return json.loads(json_str) if json_str else {}
254
- except Exception as e1:
255
- try:
256
- json_str = json_str.strip()
257
- json_str = json_str.replace("'", '"')
258
- keys = get_info(json_str, r"(\w+):")
259
- for key in keys:
260
- json_str = json_str.replace(key, '"%s"' % key)
261
-
262
- return json.loads(json_str) if json_str else {}
263
-
264
- except Exception as e2:
265
- logger.error(
266
- """
267
- e1: %s
268
- format json_str: %s
269
- e2: %s
270
- """
271
- % (e1, json_str, e2)
272
- )
273
-
274
- return {}
275
-
276
-
277
- def dumps_json(data, indent=4, sort_keys=False):
278
- """
279
- @summary: 格式化json 用于打印
280
- ---------
281
- @param data: json格式的字符串或json对象
282
- @param indent:
283
- @param sort_keys:
284
- ---------
285
- @result: 格式化后的字符串
286
- """
287
- try:
288
- if isinstance(data, str):
289
- data = get_json(data)
290
-
291
- data = json.dumps(
292
- data,
293
- ensure_ascii=False,
294
- indent=indent,
295
- skipkeys=True,
296
- sort_keys=sort_keys,
297
- default=str,
298
- )
299
-
300
- except Exception as e:
301
- data = pformat(data)
302
-
303
- return data
1
+ def custom_extractor_proxy(data: dict, key: str='proxy') -> dict | str | None:
2
+ """只负责从 API 返回数据中提取代理部分"""
3
+ if data.get("status") == 0:
4
+ return data.get(key) # 返回 {"http": "...", "https": "..."} 整个字典
5
+ return None
crawlo/utils/url.py CHANGED
@@ -1,40 +1,40 @@
1
- from urllib.parse import urldefrag
2
- from w3lib.url import add_or_replace_parameter
3
-
4
-
5
- def escape_ajax(url: str) -> str:
6
- """
7
- 根据Google AJAX爬取规范转换URL(处理哈希片段#!):
8
- https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
9
-
10
- 规则说明:
11
- 1. 仅当URL包含 `#!` 时才转换(表示这是AJAX可爬取页面)
12
- 2. 将 `#!key=value` 转换为 `?_escaped_fragment_=key%3Dvalue`
13
- 3. 保留原始查询参数(如果有)
14
-
15
- 示例:
16
- >>> escape_ajax("www.example.com/ajax.html#!key=value")
17
- 'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
18
- >>> escape_ajax("www.example.com/ajax.html?k1=v1#!key=value")
19
- 'www.example.com/ajax.html?k1=v1&_escaped_fragment_=key%3Dvalue'
20
- >>> escape_ajax("www.example.com/ajax.html#!")
21
- 'www.example.com/ajax.html?_escaped_fragment_='
22
-
23
- 非AJAX可爬取的URL(无#!)原样返回:
24
- >>> escape_ajax("www.example.com/ajax.html#normal")
25
- 'www.example.com/ajax.html#normal'
26
- """
27
- # 分离URL的基础部分和哈希片段
28
- de_frag, frag = urldefrag(url)
29
-
30
- # 仅处理以"!"开头的哈希片段(Google规范)
31
- if not frag.startswith("!"):
32
- return url # 不符合规则则原样返回
33
-
34
- # 调用辅助函数添加 `_escaped_fragment_` 参数
35
- return add_or_replace_parameter(de_frag, "_escaped_fragment_", frag[1:])
36
-
37
-
38
- if __name__ == '__main__':
39
- f = escape_ajax('http://example.com/page#!')
1
+ from urllib.parse import urldefrag
2
+ from w3lib.url import add_or_replace_parameter
3
+
4
+
5
+ def escape_ajax(url: str) -> str:
6
+ """
7
+ 根据Google AJAX爬取规范转换URL(处理哈希片段#!):
8
+ https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
9
+
10
+ 规则说明:
11
+ 1. 仅当URL包含 `#!` 时才转换(表示这是AJAX可爬取页面)
12
+ 2. 将 `#!key=value` 转换为 `?_escaped_fragment_=key%3Dvalue`
13
+ 3. 保留原始查询参数(如果有)
14
+
15
+ 示例:
16
+ >>> escape_ajax("www.example.com/ajax.html#!key=value")
17
+ 'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
18
+ >>> escape_ajax("www.example.com/ajax.html?k1=v1#!key=value")
19
+ 'www.example.com/ajax.html?k1=v1&_escaped_fragment_=key%3Dvalue'
20
+ >>> escape_ajax("www.example.com/ajax.html#!")
21
+ 'www.example.com/ajax.html?_escaped_fragment_='
22
+
23
+ 非AJAX可爬取的URL(无#!)原样返回:
24
+ >>> escape_ajax("www.example.com/ajax.html#normal")
25
+ 'www.example.com/ajax.html#normal'
26
+ """
27
+ # 分离URL的基础部分和哈希片段
28
+ de_frag, frag = urldefrag(url)
29
+
30
+ # 仅处理以"!"开头的哈希片段(Google规范)
31
+ if not frag.startswith("!"):
32
+ return url # 不符合规则则原样返回
33
+
34
+ # 调用辅助函数添加 `_escaped_fragment_` 参数
35
+ return add_or_replace_parameter(de_frag, "_escaped_fragment_", frag[1:])
36
+
37
+
38
+ if __name__ == '__main__':
39
+ f = escape_ajax('http://example.com/page#!')
40
40
  print(f)
@@ -1,48 +1,49 @@
1
- Metadata-Version: 2.4
2
- Name: crawlo
3
- Version: 1.0.4
4
- Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
- Home-page: https://github.com/crawl-coder/Crawlo.git
6
- Author: crawl-coder
7
- Author-email: crawlo@qq.com
8
- License: MIT
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Operating System :: OS Independent
12
- Requires-Python: >=3.6
13
- Description-Content-Type: text/markdown
14
- Requires-Dist: aiohttp>=3.12.14
15
- Requires-Dist: aiomysql>=0.2.0
16
- Requires-Dist: aioredis>=2.0.1
17
- Requires-Dist: asyncmy>=0.2.10
18
- Requires-Dist: cssselect>=1.2.0
19
- Requires-Dist: dateparser>=1.2.2
20
- Requires-Dist: httpx[http2]>=0.27.0
21
- Requires-Dist: lxml>=5.2.1
22
- Requires-Dist: motor>=3.7.0
23
- Requires-Dist: parsel>=1.9.1
24
- Requires-Dist: pydantic>=2.11.7
25
- Requires-Dist: pymongo>=4.11
26
- Requires-Dist: PyMySQL>=1.1.1
27
- Requires-Dist: python-dateutil>=2.9.0.post0
28
- Requires-Dist: redis>=6.2.0
29
- Requires-Dist: requests>=2.32.4
30
- Requires-Dist: six>=1.17.0
31
- Requires-Dist: ujson>=5.9.0
32
- Requires-Dist: urllib3>=2.5.0
33
- Requires-Dist: w3lib>=2.1.2
34
- Provides-Extra: render
35
- Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
36
- Requires-Dist: playwright; extra == "render"
37
- Requires-Dist: selenium>=3.141.0; extra == "render"
38
- Provides-Extra: all
39
- Requires-Dist: bitarray>=1.5.3; extra == "all"
40
- Requires-Dist: PyExecJS>=1.5.1; extra == "all"
41
- Requires-Dist: pymongo>=3.10.1; extra == "all"
42
- Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
43
- Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
44
- Requires-Dist: playwright; extra == "all"
45
- Requires-Dist: selenium>=3.141.0; extra == "all"
46
-
47
- # Crawlo
48
- Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取与数据管道。
1
+ Metadata-Version: 2.4
2
+ Name: crawlo
3
+ Version: 1.0.6
4
+ Summary: Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取。
5
+ Home-page: https://github.com/crawl-coder/Crawlo.git
6
+ Author: crawl-coder
7
+ Author-email: crawlo@qq.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ Requires-Dist: aiohttp>=3.12.14
15
+ Requires-Dist: aiomysql>=0.2.0
16
+ Requires-Dist: aioredis>=2.0.1
17
+ Requires-Dist: asyncmy>=0.2.10
18
+ Requires-Dist: cssselect>=1.2.0
19
+ Requires-Dist: dateparser>=1.2.2
20
+ Requires-Dist: httpx[http2]>=0.27.0
21
+ Requires-Dist: curl-cffi>=0.13.0
22
+ Requires-Dist: lxml>=5.2.1
23
+ Requires-Dist: motor>=3.7.0
24
+ Requires-Dist: parsel>=1.9.1
25
+ Requires-Dist: pydantic>=2.11.7
26
+ Requires-Dist: pymongo>=4.11
27
+ Requires-Dist: PyMySQL>=1.1.1
28
+ Requires-Dist: python-dateutil>=2.9.0.post0
29
+ Requires-Dist: redis>=6.2.0
30
+ Requires-Dist: requests>=2.32.4
31
+ Requires-Dist: six>=1.17.0
32
+ Requires-Dist: ujson>=5.9.0
33
+ Requires-Dist: urllib3>=2.5.0
34
+ Requires-Dist: w3lib>=2.1.2
35
+ Provides-Extra: render
36
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
37
+ Requires-Dist: playwright; extra == "render"
38
+ Requires-Dist: selenium>=3.141.0; extra == "render"
39
+ Provides-Extra: all
40
+ Requires-Dist: bitarray>=1.5.3; extra == "all"
41
+ Requires-Dist: PyExecJS>=1.5.1; extra == "all"
42
+ Requires-Dist: pymongo>=3.10.1; extra == "all"
43
+ Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
44
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
45
+ Requires-Dist: playwright; extra == "all"
46
+ Requires-Dist: selenium>=3.141.0; extra == "all"
47
+
48
+ # Crawlo
49
+ Crawlo 是一款基于异步IO的高性能Python爬虫框架,支持分布式抓取与数据管道。
@@ -0,0 +1,94 @@
1
+ crawlo/__init__.py,sha256=xpiIAZbSG3CzneJuDLPCbwfRcvw2wyHYl2kJjaNfNGY,584
2
+ crawlo/__version__.py,sha256=1HqFYnow__4MUVRI_OMjvzTBzKkReNozOdA96kH53cA,23
3
+ crawlo/cli.py,sha256=hjAJKx9pba375sATvvcy-dtZyBIgXj8fRBq9RFIZHA4,1206
4
+ crawlo/crawler.py,sha256=nfuA_f8QnuIp2F4ZbaJv8Fceo_QPwqV1jYdD_edkMjg,8527
5
+ crawlo/event.py,sha256=ZhoPW5CglCEuZNFEwviSCBIw0pT5O6jT98bqYrDFd3E,324
6
+ crawlo/exceptions.py,sha256=xdyZkvVcLEJ-19sWMHvn9IJsu30-hAY2jJhA2kYIims,1207
7
+ crawlo/stats_collector.py,sha256=v4jC9BAe-23w93hWzbeMCCgQ9VuFPyxw5JV9ItbGH8w,1636
8
+ crawlo/subscriber.py,sha256=udlHeTR0ymGQhCDxVUGwUzeeeR4TYCEJrJwFnkgr0cU,3836
9
+ crawlo/task_manager.py,sha256=D9m-nqnGj-FZPtGk4CdwZX3Gw7IWyYvTS7CHpRGWc_w,748
10
+ crawlo/commands/__init__.py,sha256=dRu3ipuhDM7M1eTb6zJtQZ_u7N_tZumGfH5_I92xno8,252
11
+ crawlo/commands/genspider.py,sha256=kSHYsAGHRoxU6Qf_MGpR_VS-Ua5NUGY2KGm_Wapn0sw,3529
12
+ crawlo/commands/run.py,sha256=Upv8K4sM0c0I1fIwTFK18VDcSHF7xabqfXtQ82fk56g,4628
13
+ crawlo/commands/startproject.py,sha256=1KOq_CALy01oklr0dAUYhGFzu4f7w45q2H0O3qafLX4,3494
14
+ crawlo/core/__init__.py,sha256=JYSAn15r8yWgRK_Nc69t_8tZCyb70MiPZKssA8wrYz0,43
15
+ crawlo/core/engine.py,sha256=JFHooPp-5cfHSyxEh87nOOR5NMaPLVDfNSqAsbtx4PM,6030
16
+ crawlo/core/processor.py,sha256=oHLs-cno0bJGTNc9NGD2S7_2-grI3ruvggO0SY2mf3Q,1180
17
+ crawlo/core/scheduler.py,sha256=ZMPs4LSs69FsFfDTvaOMJKqpSQQGvIEE9pMyYVVAA64,1948
18
+ crawlo/downloader/__init__.py,sha256=72u2Hef4HaMfs9VCqEjbMtiaRXbaXmgNiJn6qy09LHs,2384
19
+ crawlo/downloader/aiohttp_downloader.py,sha256=YfvYCDp3y0OsVyfdYX1XJC3EcCrbNLKOcFY8b7JC3_w,7675
20
+ crawlo/downloader/cffi_downloader.py,sha256=QthBmZOE0cjYNRTM-449EuaFuqxxdc19kp93iqOlwB8,12678
21
+ crawlo/downloader/httpx_downloader.py,sha256=yshb1JZa4B6hcVwIT97SrxCXkj3HJqT9IDpxSxjkJm4,11754
22
+ crawlo/extension/__init__.py,sha256=O2BVK1U3WwmurZb-PaYVz3g1tZ_iYUjCwilmUKf6844,1170
23
+ crawlo/extension/log_interval.py,sha256=FOWeTOuWtOpCz2UPV5F_--QIa8yomltSpjxbw3F7bkU,1971
24
+ crawlo/extension/log_stats.py,sha256=JFJsdK7UWkhP4TEAF-H-S7SpQbDpBryS0AT6e6jZCBo,1721
25
+ crawlo/extension/logging_extension.py,sha256=rty2_up53KV05nCazuBuz2ZapHKq0ti7mGVBzMTr0ak,1236
26
+ crawlo/filters/__init__.py,sha256=9fJQRVkxWWPChajYbAGe1O6UYB639xWt0hiLUGBs4hQ,1014
27
+ crawlo/filters/aioredis_filter.py,sha256=phBFW9Z28oylbik9Kb2WHM65Wo5yRAH2w9Yz0_2HaOQ,5621
28
+ crawlo/filters/memory_filter.py,sha256=L8XEJkObOxs4BzYpQvk9PVM969k2LE61VFsnEOTEf_E,6841
29
+ crawlo/items/__init__.py,sha256=HLDShSwAQUrgwt9_Ec2SIwzpIDZnNOCg9nSYqqEQdp8,407
30
+ crawlo/items/base.py,sha256=DZG0qENdukJExRtKjqdNkSlzUoWR3ucjyF73LYLANFo,754
31
+ crawlo/items/fields.py,sha256=fpS0vlRPpZYjTaMDgI9Q8z_YQqruwf6fi4Dgm6R2oEk,1854
32
+ crawlo/items/items.py,sha256=OmVEvMmgofMU95GkaiWkfNQ2fjsH2fY9sw3SKcmUhLs,3478
33
+ crawlo/middleware/__init__.py,sha256=PSwpRLdBUopaQzBp1S0zK_TZbrRagQ4yzvgyLy4tBk8,570
34
+ crawlo/middleware/default_header.py,sha256=OVW4vpRPp3Y6qYXtiEYlGqVjCYcbuv1Iecc7zEgwCsI,1099
35
+ crawlo/middleware/download_delay.py,sha256=P2eyAJXwdLdC4yYuLhvKZVa1b5YQvQD0GpsR8aDW8-8,994
36
+ crawlo/middleware/middleware_manager.py,sha256=Vfkasi8YaLxzGrOrFYfxOMEGRS8XocqeQMruLtVxL_c,6360
37
+ crawlo/middleware/proxy.py,sha256=PiIfhRXfcMzBtW_p7jfR8rGxcM4VT68Mk54swbaV2H4,9801
38
+ crawlo/middleware/request_ignore.py,sha256=jdybWFVXuA5YsAPfZJFzLTWkYhEAewNgxuhFqczPW9M,1027
39
+ crawlo/middleware/response_code.py,sha256=vgXWv3mMu_v9URvhKA9myIFH4u6L4EwNme80wL4DCGc,677
40
+ crawlo/middleware/response_filter.py,sha256=O2gkV_Yjart8kmmXTGzrtZnb_Uuefap4uL2Cu01iRs4,863
41
+ crawlo/middleware/retry.py,sha256=a2EmigYFzL8oxd50JhrSe5XbYJyx8yDjOjE5fXAOFhY,3459
42
+ crawlo/network/__init__.py,sha256=DVz1JpasjxCgOlXvm76gz-S18OXr4emG_J39yi5iVuA,130
43
+ crawlo/network/request.py,sha256=qd50mmrXS6yZKmAb6ERAMHzm2Ln80Wu5NSMwx_t1IGc,7247
44
+ crawlo/network/response.py,sha256=z2Owti_9ds567jLvfuX8hrfdQL8JKn5lkt2QOc-Gi3Y,6200
45
+ crawlo/pipelines/__init__.py,sha256=IbXJ6B8LqxVVjeLNgL_12AxV6zbV8hNRQxAfMLjjSaw,273
46
+ crawlo/pipelines/console_pipeline.py,sha256=bwe5hZgaVSWmh3R8XpOaaeAjJme-Ttrpo6G6f1cnLIg,1287
47
+ crawlo/pipelines/mongo_pipeline.py,sha256=Yr48D0T61-_Y-EpgWXf7BUn9w8e-Pj5P07QDSPZ0pYU,4558
48
+ crawlo/pipelines/mysql_batch_pipline.py,sha256=Mj3PReDRw22JhJ5hZxnka4cirKq3kEbOCNhgpq1gvfA,10611
49
+ crawlo/pipelines/mysql_pipeline.py,sha256=bsAFqpxrCijzvX-IusxOtvTvQEUCt5uHNTyYMo_pIq4,8056
50
+ crawlo/pipelines/pipeline_manager.py,sha256=k-Rg0os0Havrov99D-Jn3ROpnz154K30tf7aARE5W3k,2174
51
+ crawlo/settings/__init__.py,sha256=NgYFLfk_Bw7h6KSoepJn_lMBSqVbCHebjKxaE3_eMgw,130
52
+ crawlo/settings/default_settings.py,sha256=urj4XJ--ZpVRbbo3fWUT71bYQLmElx43AC9KeHtqHBs,7310
53
+ crawlo/settings/setting_manager.py,sha256=4xXOzKwZCgAp8ybwvVcs2R--CsOD7c6dBIkj6DJHB3c,2998
54
+ crawlo/spider/__init__.py,sha256=lWi0bCR7HLT5bnj7_e9UIgFJjuqoeWtbwADfNkaajug,1139
55
+ crawlo/templates/crawlo.cfg.tmpl,sha256=lwiUVe5sFixJgHFEjn1OtbAeyWsECOrz37uheuVtulk,240
56
+ crawlo/templates/project/__init__.py.tmpl,sha256=aQnHaOjMSkTviOC8COUX0fKymuyf8lx2tGduxkMkXEE,61
57
+ crawlo/templates/project/items.py.tmpl,sha256=bXx-oCldMr2EgBKUAH9LH5gMnbyLiWX-EySAaMzcu2g,318
58
+ crawlo/templates/project/middlewares.py.tmpl,sha256=VAolmMTC6HBmJT5XvWB0ag6ig9iaDBS32adIQ1zPdw0,2177
59
+ crawlo/templates/project/pipelines.py.tmpl,sha256=xK1Yl7wYxiyUCm07GZvMnCS_cxJ5LF7z1YBBdkLlWys,1880
60
+ crawlo/templates/project/settings.py.tmpl,sha256=985Z-jiU6A31f5s1IVU4PvkC_QGlFlRRfTF6rZ_G4ek,1771
61
+ crawlo/templates/project/spiders/__init__.py.tmpl,sha256=zMbePipgLsctQUEnda4WkHz8rDLUX--rc8ruI6zkpWc,111
62
+ crawlo/templates/spider/spider.py.tmpl,sha256=SkNv1kOwet7ZdxoNXpj-o1iRETB30bcwPP16Uy8lyXg,869
63
+ crawlo/utils/__init__.py,sha256=XCYumI8wJ1jU_Myn_K0LT-LVygPDUCdETCbXM3EWvlo,130
64
+ crawlo/utils/concurrency_manager.py,sha256=o-_cfeUHdlBOM3eAXF857MtekSrRcVTBJ2jWZvY6weQ,5230
65
+ crawlo/utils/date_tools.py,sha256=lcEFP2Z5b-6pUTHczrzCCuqiHP_4_2zamomMGPZrExo,7194
66
+ crawlo/utils/db_helper.py,sha256=ZqOt1d3mErVv4TOvoWlov0niUxORB9aHByTmMoNFIDw,10902
67
+ crawlo/utils/func_tools.py,sha256=y-TYP9H3X67MS_foWy9Z2LIS6GP7Y4Cy3T168ulq3Jc,2451
68
+ crawlo/utils/log.py,sha256=YD2FfXuuE2MC9ZdQQZ0H7KysE7l_LHZqQepaTPlcApo,4133
69
+ crawlo/utils/pqueue.py,sha256=HDgX4HAkc7RqYUtX6q51tzI1ZRTACf8P_4jLqC4-uC0,5559
70
+ crawlo/utils/project.py,sha256=qAiCmpIxiB7RxCLG-U5lGV6k4UCa21uRdykTfnAF834,7669
71
+ crawlo/utils/request.py,sha256=ejdKpTwc-HE04HQybafhOVywzz57IV3pY0YMkSLyGUo,9065
72
+ crawlo/utils/spider_loader.py,sha256=V0CBTicJBYBZafhwLfDEfuEc_hJ2mSoiptT6qKufI9U,2249
73
+ crawlo/utils/system.py,sha256=24zGmtHNhDFMGVo7ftMV-Pqg6_5d63zsyNey9udvJJk,248
74
+ crawlo/utils/tools.py,sha256=uy7qw5Z1BIhyEgiHENvtM7WoGCJxlS8EX3PmOA7ouCo,275
75
+ crawlo/utils/url.py,sha256=RKe_iqdjafsNcp-P2GVLYpsL1qbxiuZLiFc-SqOQkcs,1521
76
+ examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
+ examples/gxb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
+ examples/gxb/items.py,sha256=3-1Lxpi7EqMzheDJoO0MPyHky5nHG_nqQGgKlm8y6mQ,989
79
+ examples/gxb/run.py,sha256=9kJlR8f-tZ3BqP5PW7sCLTw6PAFWo3x4cG5lc-6GWqI,333
80
+ examples/gxb/settings.py,sha256=_nbXj9HV2e0F6liUzK0ueygLcaMM_IUlkuwL6mJqUfc,2345
81
+ examples/gxb/spider/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
+ examples/gxb/spider/miit_spider.py,sha256=tcQnuyUHfu-Re1QbKKSI9DXW3Sp1vyBW8qBzKLf_RC4,6666
83
+ examples/gxb/spider/telecom_device.py,sha256=58iG6BQtQjjDHOF7-DXH0u5_XnppP5AJTQwaVJVyBEo,4929
84
+ tests/__init__.py,sha256=409aRX8hsPffiZCVjOogtxwhACzBp8G2UTJyUQSxhK0,136
85
+ tests/test_proxy_health_check.py,sha256=_tDlxa_6TdL3M5RLkHF82roXJ8WIuG5hELBp2GADyKQ,1123
86
+ tests/test_proxy_middleware_integration.py,sha256=mTPK_XvbmLCV_QoVZzA3ybWOOX61493Ew78WfTp-bYQ,4441
87
+ tests/test_proxy_providers.py,sha256=u_R2fhab90vqvQEaOAztpAOe9tJXvUMIdoDxmStmXJ4,1749
88
+ tests/test_proxy_stats.py,sha256=ES00CEoDITYPFBGPk8pecFzD3ItYIv6NSpcqNd8-kvo,526
89
+ tests/test_proxy_strategies.py,sha256=9Z1pXmTNyw-eIhGXlf2abZbJx6igLohYq-_3hldQ5uE,1868
90
+ crawlo-1.0.6.dist-info/METADATA,sha256=_TDAivxDg2R8omq5gG1kUiODY2tZ3UEp5aH0SwshOjI,1825
91
+ crawlo-1.0.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
92
+ crawlo-1.0.6.dist-info/entry_points.txt,sha256=5HoVoTSPxI8SCa5B7pQYxLSrkOdiunyO9tqNsLMv52g,43
93
+ crawlo-1.0.6.dist-info/top_level.txt,sha256=keG_67pbZ_wZL2dmDRA9RMaNHTaV_x_oxZ9DKNgwvR0,22
94
+ crawlo-1.0.6.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ crawlo = crawlo.cli:main
@@ -1,2 +1,3 @@
1
1
  crawlo
2
+ examples
2
3
  tests
examples/gxb/items.py ADDED
@@ -0,0 +1,36 @@
1
+ from crawlo.items import Item, Field
2
+
3
+ class RadioApprovalItem(Item):
4
+ approval_number = Field()
5
+ device_name = Field()
6
+ device_model = Field()
7
+ applicant = Field()
8
+ remarks = Field()
9
+ validity_period = Field()
10
+ frequency_tolerance = Field()
11
+ frequency_range = Field()
12
+ transmit_power = Field()
13
+ occupied_bandwidth = Field()
14
+ spurious_emission_limit = Field()
15
+ issue_date = Field()
16
+ approval_code = Field()
17
+ cmiit_id = Field()
18
+ modulation_mode = Field()
19
+ technology_system = Field()
20
+ mid = Field()
21
+
22
+ class TelecomLicenseItem(Item):
23
+ license_number = Field()
24
+ device_name = Field()
25
+ device_model = Field()
26
+ applicant = Field()
27
+ manufacturer = Field()
28
+ issue_date = Field()
29
+ expiry_date = Field()
30
+ certificate_type = Field()
31
+ remarks = Field()
32
+ certificate_status = Field()
33
+ origin = Field()
34
+ article_id = Field()
35
+ article_edit_date = Field()
36
+ create_time = Field()
examples/gxb/run.py ADDED
@@ -0,0 +1,16 @@
1
+ import asyncio
2
+
3
+ from crawlo.crawler import CrawlerProcess
4
+ from examples.gxb.spider.telecom_device import TelecomDeviceLicensesSpider
5
+
6
+ async def main():
7
+ process = CrawlerProcess()
8
+ await process.crawl(
9
+ [TelecomDeviceLicensesSpider]
10
+ )
11
+
12
+
13
+
14
+ if __name__ == '__main__':
15
+ asyncio.run(main())
16
+ # 132023