crawlo 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (59) hide show
  1. crawlo/__init__.py +5 -0
  2. crawlo/__version__.py +2 -0
  3. crawlo/core/__init__.py +2 -0
  4. crawlo/core/engine.py +157 -0
  5. crawlo/core/processor.py +40 -0
  6. crawlo/core/scheduler.py +35 -0
  7. crawlo/crawler.py +107 -0
  8. crawlo/downloader/__init__.py +78 -0
  9. crawlo/downloader/aiohttp_downloader.py +96 -0
  10. crawlo/downloader/httpx_downloader.py +48 -0
  11. crawlo/event.py +11 -0
  12. crawlo/exceptions.py +64 -0
  13. crawlo/extension/__init__.py +31 -0
  14. crawlo/extension/log_interval.py +49 -0
  15. crawlo/extension/log_stats.py +44 -0
  16. crawlo/items/__init__.py +24 -0
  17. crawlo/items/items.py +88 -0
  18. crawlo/middleware/__init__.py +21 -0
  19. crawlo/middleware/default_header.py +32 -0
  20. crawlo/middleware/download_delay.py +28 -0
  21. crawlo/middleware/middleware_manager.py +140 -0
  22. crawlo/middleware/request_ignore.py +30 -0
  23. crawlo/middleware/response_code.py +19 -0
  24. crawlo/middleware/response_filter.py +26 -0
  25. crawlo/middleware/retry.py +84 -0
  26. crawlo/network/__init__.py +7 -0
  27. crawlo/network/request.py +52 -0
  28. crawlo/network/response.py +93 -0
  29. crawlo/pipelines/__init__.py +13 -0
  30. crawlo/pipelines/console_pipeline.py +20 -0
  31. crawlo/pipelines/mongo_pipeline.py +5 -0
  32. crawlo/pipelines/mysql_pipeline.py +5 -0
  33. crawlo/pipelines/pipeline_manager.py +56 -0
  34. crawlo/settings/__init__.py +7 -0
  35. crawlo/settings/default_settings.py +39 -0
  36. crawlo/settings/setting_manager.py +100 -0
  37. crawlo/spider/__init__.py +36 -0
  38. crawlo/stats_collector.py +47 -0
  39. crawlo/subscriber.py +27 -0
  40. crawlo/task_manager.py +27 -0
  41. crawlo/templates/item_template.tmpl +22 -0
  42. crawlo/templates/project_template/items/__init__.py +0 -0
  43. crawlo/templates/project_template/main.py +33 -0
  44. crawlo/templates/project_template/setting.py +190 -0
  45. crawlo/templates/project_template/spiders/__init__.py +0 -0
  46. crawlo/templates/spider_template.tmpl +31 -0
  47. crawlo/utils/__init__.py +7 -0
  48. crawlo/utils/date_tools.py +20 -0
  49. crawlo/utils/func_tools.py +22 -0
  50. crawlo/utils/log.py +39 -0
  51. crawlo/utils/pqueue.py +16 -0
  52. crawlo/utils/project.py +58 -0
  53. crawlo/utils/system.py +11 -0
  54. crawlo-1.0.0.dist-info/METADATA +36 -0
  55. crawlo-1.0.0.dist-info/RECORD +59 -0
  56. crawlo-1.0.0.dist-info/WHEEL +5 -0
  57. crawlo-1.0.0.dist-info/entry_points.txt +2 -0
  58. crawlo-1.0.0.dist-info/licenses/LICENSE +23 -0
  59. crawlo-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,190 @@
1
+ # -*- coding: utf-8 -*-
2
+ """爬虫配置文件"""
3
+ # import os
4
+ # import sys
5
+ #
6
+ # # MYSQL
7
+ # MYSQL_IP = "localhost"
8
+ # MYSQL_PORT = 3306
9
+ # MYSQL_DB = ""
10
+ # MYSQL_USER_NAME = ""
11
+ # MYSQL_USER_PASS = ""
12
+ #
13
+ # # MONGODB
14
+ # MONGO_IP = "localhost"
15
+ # MONGO_PORT = 27017
16
+ # MONGO_DB = ""
17
+ # MONGO_USER_NAME = ""
18
+ # MONGO_USER_PASS = ""
19
+ # MONGO_URL = "
20
+ #
21
+ # # REDIS
22
+ # # ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
23
+ # REDISDB_IP_PORTS = "localhost:6379"
24
+ # REDISDB_USER_PASS = ""
25
+ # REDISDB_DB = 0
26
+ # # 连接redis时携带的其他参数,如ssl=True
27
+ # REDISDB_KWARGS = dict()
28
+ # # 适用于redis哨兵模式
29
+ # REDISDB_SERVICE_NAME = ""
30
+ #
31
+ # # 数据入库的pipeline,可自定义,默认MysqlPipeline
32
+ # ITEM_PIPELINES = [
33
+ # "feapder.pipelines.mysql_pipeline.MysqlPipeline",
34
+ # # "feapder.pipelines.mongo_pipeline.MongoPipeline",
35
+ # # "feapder.pipelines.console_pipeline.ConsolePipeline",
36
+ # ]
37
+ # EXPORT_DATA_MAX_FAILED_TIMES = 10 # 导出数据时最大的失败次数,包括保存和更新,超过这个次数报警
38
+ # EXPORT_DATA_MAX_RETRY_TIMES = 10 # 导出数据时最大的重试次数,包括保存和更新,超过这个次数则放弃重试
39
+ #
40
+ # # 爬虫相关
41
+ # # COLLECTOR
42
+ # COLLECTOR_TASK_COUNT = 32 # 每次获取任务数量,追求速度推荐32
43
+ #
44
+ # # SPIDER
45
+ # SPIDER_THREAD_COUNT = 1 # 爬虫并发数,追求速度推荐32
46
+ # # 下载时间间隔 单位秒。 支持随机 如 SPIDER_SLEEP_TIME = [2, 5] 则间隔为 2~5秒之间的随机数,包含2和5
47
+ # SPIDER_SLEEP_TIME = 0
48
+ # SPIDER_MAX_RETRY_TIMES = 10 # 每个请求最大重试次数
49
+ # KEEP_ALIVE = False # 爬虫是否常驻
50
+
51
+ # 下载
52
+ # DOWNLOADER = "feapder.network.downloader.RequestsDownloader" # 请求下载器
53
+ # SESSION_DOWNLOADER = "feapder.network.downloader.RequestsSessionDownloader"
54
+ # RENDER_DOWNLOADER = "feapder.network.downloader.SeleniumDownloader" # 渲染下载器
55
+ # # RENDER_DOWNLOADER="feapder.network.downloader.PlaywrightDownloader"
56
+ # MAKE_ABSOLUTE_LINKS = True # 自动转成绝对连接
57
+
58
+ # # 浏览器渲染
59
+ # WEBDRIVER = dict(
60
+ # pool_size=1, # 浏览器的数量
61
+ # load_images=True, # 是否加载图片
62
+ # user_agent=None, # 字符串 或 无参函数,返回值为user_agent
63
+ # proxy=None, # xxx.xxx.xxx.xxx:xxxx 或 无参函数,返回值为代理地址
64
+ # headless=False, # 是否为无头浏览器
65
+ # driver_type="CHROME", # CHROME、EDGE、PHANTOMJS、FIREFOX
66
+ # timeout=30, # 请求超时时间
67
+ # window_size=(1024, 800), # 窗口大小
68
+ # executable_path=None, # 浏览器路径,默认为默认路径
69
+ # render_time=0, # 渲染时长,即打开网页等待指定时间后再获取源码
70
+ # custom_argument=[
71
+ # "--ignore-certificate-errors",
72
+ # "--disable-blink-features=AutomationControlled",
73
+ # ], # 自定义浏览器渲染参数
74
+ # xhr_url_regexes=None, # 拦截xhr接口,支持正则,数组类型
75
+ # auto_install_driver=True, # 自动下载浏览器驱动 支持chrome 和 firefox
76
+ # download_path=None, # 下载文件的路径
77
+ # use_stealth_js=False, # 使用stealth.min.js隐藏浏览器特征
78
+ # )
79
+ #
80
+ # PLAYWRIGHT = dict(
81
+ # user_agent=None, # 字符串 或 无参函数,返回值为user_agent
82
+ # proxy=None, # xxx.xxx.xxx.xxx:xxxx 或 无参函数,返回值为代理地址
83
+ # headless=False, # 是否为无头浏览器
84
+ # driver_type="chromium", # chromium、firefox、webkit
85
+ # timeout=30, # 请求超时时间
86
+ # window_size=(1024, 800), # 窗口大小
87
+ # executable_path=None, # 浏览器路径,默认为默认路径
88
+ # download_path=None, # 下载文件的路径
89
+ # render_time=0, # 渲染时长,即打开网页等待指定时间后再获取源码
90
+ # wait_until="networkidle", # 等待页面加载完成的事件,可选值:"commit", "domcontentloaded", "load", "networkidle"
91
+ # use_stealth_js=False, # 使用stealth.min.js隐藏浏览器特征
92
+ # page_on_event_callback=None, # page.on() 事件的回调 如 page_on_event_callback={"dialog": lambda dialog: dialog.accept()}
93
+ # storage_state_path=None, # 保存浏览器状态的路径
94
+ # url_regexes=None, # 拦截接口,支持正则,数组类型
95
+ # save_all=False, # 是否保存所有拦截的接口, 配合url_regexes使用,为False时只保存最后一次拦截的接口
96
+ # )
97
+ #
98
+ # # 爬虫启动时,重新抓取失败的requests
99
+ # RETRY_FAILED_REQUESTS = False
100
+ # # 爬虫启动时,重新入库失败的item
101
+ # RETRY_FAILED_ITEMS = False
102
+ # # 保存失败的request
103
+ # SAVE_FAILED_REQUEST = True
104
+ # # request防丢机制。(指定的REQUEST_LOST_TIMEOUT时间内request还没做完,会重新下发 重做)
105
+ # REQUEST_LOST_TIMEOUT = 600 # 10分钟
106
+ # # request网络请求超时时间
107
+ # REQUEST_TIMEOUT = 22 # 等待服务器响应的超时时间,浮点数,或(connect timeout, read timeout)元组
108
+ # # item在内存队列中最大缓存数量
109
+ # ITEM_MAX_CACHED_COUNT = 5000
110
+ # # item每批入库的最大数量
111
+ # ITEM_UPLOAD_BATCH_MAX_SIZE = 1000
112
+ # # item入库时间间隔
113
+ # ITEM_UPLOAD_INTERVAL = 1
114
+ # # 内存任务队列最大缓存的任务数,默认不限制;仅对AirSpider有效。
115
+ # TASK_MAX_CACHED_SIZE = 0
116
+ #
117
+ # # 下载缓存 利用redis缓存,但由于内存大小限制,所以建议仅供开发调试代码时使用,防止每次debug都需要网络请求
118
+ # RESPONSE_CACHED_ENABLE = False # 是否启用下载缓存 成本高的数据或容易变需求的数据,建议设置为True
119
+ # RESPONSE_CACHED_EXPIRE_TIME = 3600 # 缓存时间 秒
120
+ # RESPONSE_CACHED_USED = False # 是否使用缓存 补采数据时可设置为True
121
+ #
122
+ # # 设置代理
123
+ # PROXY_EXTRACT_API = None # 代理提取API ,返回的代理分割符为\r\n
124
+ # PROXY_ENABLE = True
125
+ # PROXY_MAX_FAILED_TIMES = 5 # 代理最大失败次数,超过则不使用,自动删除
126
+ # PROXY_POOL = "feapder.network.proxy_pool.ProxyPool" # 代理池
127
+ #
128
+ # # 随机headers
129
+ # RANDOM_HEADERS = True
130
+ # # UserAgent类型 支持 'chrome', 'opera', 'firefox', 'internetexplorer', 'safari','mobile' 若不指定则随机类型
131
+ # USER_AGENT_TYPE = "chrome"
132
+ # # 默认使用的浏览器头
133
+ # DEFAULT_USERAGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
134
+ # # requests 使用session
135
+ # USE_SESSION = False
136
+ #
137
+ # # 去重
138
+ # ITEM_FILTER_ENABLE = False # item 去重
139
+ # REQUEST_FILTER_ENABLE = False # request 去重
140
+ # ITEM_FILTER_SETTING = dict(
141
+ # filter_type=1 # 永久去重(BloomFilter) = 1 、内存去重(MemoryFilter) = 2、 临时去重(ExpireFilter)= 3、轻量去重(LiteFilter)= 4
142
+ # )
143
+ # REQUEST_FILTER_SETTING = dict(
144
+ # filter_type=3, # 永久去重(BloomFilter) = 1 、内存去重(MemoryFilter) = 2、 临时去重(ExpireFilter)= 3、 轻量去重(LiteFilter)= 4
145
+ # expire_time=2592000, # 过期时间1个月
146
+ # )
147
+ #
148
+ # # 报警 支持钉钉、飞书、企业微信、邮件
149
+ # # 钉钉报警
150
+ # DINGDING_WARNING_URL = "" # 钉钉机器人api
151
+ # DINGDING_WARNING_PHONE = "" # 被@的群成员手机号,支持列表,可指定多个。
152
+ # DINGDING_WARNING_USER_ID = "" # 被@的群成员userId,支持列表,可指定多个
153
+ # DINGDING_WARNING_ALL = False # 是否提示所有人, 默认为False
154
+ # DINGDING_WARNING_SECRET = None # 加签密钥
155
+ # # 飞书报警
156
+ # # https://open.feishu.cn/document/ukTMukTMukTM/ucTM5YjL3ETO24yNxkjN#e1cdee9f
157
+ # FEISHU_WARNING_URL = "" # 飞书机器人api
158
+ # FEISHU_WARNING_USER = None # 报警人 {"open_id":"ou_xxxxx", "name":"xxxx"} 或 [{"open_id":"ou_xxxxx", "name":"xxxx"}]
159
+ # FEISHU_WARNING_ALL = False # 是否提示所有人, 默认为False
160
+ # # 邮件报警
161
+ # EMAIL_SENDER = "" # 发件人
162
+ # EMAIL_PASSWORD = "" # 授权码
163
+ # EMAIL_RECEIVER = "" # 收件人 支持列表,可指定多个
164
+ # EMAIL_SMTPSERVER = "smtp.163.com" # 邮件服务器 默认为163邮箱
165
+ # # 企业微信报警
166
+ # WECHAT_WARNING_URL = "" # 企业微信机器人api
167
+ # WECHAT_WARNING_PHONE = "" # 报警人 将会在群内@此人, 支持列表,可指定多人
168
+ # WECHAT_WARNING_ALL = False # 是否提示所有人, 默认为False
169
+ # # 时间间隔
170
+ # WARNING_INTERVAL = 3600 # 相同报警的报警时间间隔,防止刷屏; 0表示不去重
171
+ # WARNING_LEVEL = "DEBUG" # 报警级别, DEBUG / INFO / ERROR
172
+ # WARNING_FAILED_COUNT = 1000 # 任务失败数 超过WARNING_FAILED_COUNT则报警
173
+ #
174
+ # LOG_NAME = os.path.basename(os.getcwd())
175
+ # LOG_PATH = "log/%s.log" % LOG_NAME # log存储路径
176
+ # LOG_LEVEL = "DEBUG"
177
+ # LOG_COLOR = True # 是否带有颜色
178
+ # LOG_IS_WRITE_TO_CONSOLE = True # 是否打印到控制台
179
+ # LOG_IS_WRITE_TO_FILE = False # 是否写文件
180
+ # LOG_MODE = "w" # 写文件的模式
181
+ # LOG_MAX_BYTES = 10 * 1024 * 1024 # 每个日志文件的最大字节数
182
+ # LOG_BACKUP_COUNT = 20 # 日志文件保留数量
183
+ # LOG_ENCODING = "utf8" # 日志文件编码
184
+ # OTHERS_LOG_LEVAL = "ERROR" # 第三方库的log等级
185
+ #
186
+ # # 切换工作路径为当前项目路径
187
+ # project_path = os.path.abspath(os.path.dirname(__file__))
188
+ # os.chdir(project_path) # 切换工作路经
189
+ # sys.path.insert(0, project_path)
190
+ # print("当前工作路径为 " + os.getcwd())
File without changes
@@ -0,0 +1,31 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on {DATE}
4
+ ---------
5
+ @summary:
6
+ ---------
7
+ @author: {USER}
8
+ """
9
+
10
+ import crawlo
11
+
12
+
13
+ class ${spider_name}(crawlo.Spider):
14
+ # 自定义数据库,若项目中有setting.py文件,此自定义可删除
15
+ __custom_setting__ = dict(
16
+ REDISDB_IP_PORTS="localhost:6379", REDISDB_USER_PASS="", REDISDB_DB=0
17
+ )
18
+
19
+ def start_requests(self):
20
+ yield feapder.Request("https://spidertools.cn")
21
+
22
+ def parse(self, request, response):
23
+ # 提取网站title
24
+ print(response.xpath("//title/text()").extract_first())
25
+ # 提取网站描述
26
+ print(response.xpath("//meta[@name='description']/@content").extract_first())
27
+ print("网站地址: ", response.url)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ ${spider_name}(redis_key="xxx:xxx").start()
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-02-05 13:57
5
+ # @Author : oscar
6
+ # @Desc : None
7
+ """
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-05-17 10:20
5
+ # @Author : crawl-coder
6
+ # @Desc : 时间工具
7
+ """
8
+ from datetime import datetime
9
+
10
+
11
+ def now():
12
+ return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
13
+
14
+
15
+ def date_delta(start, end):
16
+ start = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
17
+ end = datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
18
+ delta = end - start
19
+ seconds = delta.total_seconds()
20
+ return int(seconds)
@@ -0,0 +1,22 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Callable
4
+ from inspect import isgenerator, isasyncgen
5
+ from crawlo.exceptions import TransformTypeError
6
+
7
+
8
+ async def transform(func: Callable):
9
+ try:
10
+ if isgenerator(func):
11
+ for f in func:
12
+ yield f
13
+ elif isasyncgen(func):
14
+ async for f in func:
15
+ yield f
16
+ else:
17
+ raise TransformTypeError(
18
+ f'callback return type error: {type(func)} must be `generator` or `async generator`'
19
+ )
20
+ except Exception as exp:
21
+ yield exp
22
+
crawlo/utils/log.py ADDED
@@ -0,0 +1,39 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2024-04-11 09:03
5
+ # @Author : oscar
6
+ # @Desc : None
7
+ """
8
+ from logging import Formatter, StreamHandler, Logger, INFO
9
+
10
+ LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
11
+
12
+
13
+ class LoggerManager(object):
14
+ logger_cache = {}
15
+
16
+ def __init__(self):
17
+ pass
18
+
19
+ @classmethod
20
+ def get_logger(cls, name: str = 'default', level=None, log_format: str = LOG_FORMAT):
21
+ key = (name, level)
22
+
23
+ def gen_logger():
24
+ log_formatter = Formatter(log_format)
25
+ handler = StreamHandler()
26
+ handler.setFormatter(log_formatter)
27
+ handler.setLevel(level or INFO)
28
+
29
+ _logger = Logger(name=name)
30
+ _logger.addHandler(handler)
31
+ _logger.setLevel(level or INFO)
32
+ cls.logger_cache[key] = _logger
33
+ return _logger
34
+
35
+ return cls.logger_cache.get(key, None) or gen_logger()
36
+
37
+
38
+ get_logger = LoggerManager.get_logger
39
+
crawlo/utils/pqueue.py ADDED
@@ -0,0 +1,16 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ from asyncio import PriorityQueue, TimeoutError
5
+
6
+
7
+ class SpiderPriorityQueue(PriorityQueue):
8
+ def __init__(self, maxsize=0):
9
+ super(SpiderPriorityQueue, self).__init__(maxsize=maxsize)
10
+
11
+ async def get(self):
12
+ fut = super().get()
13
+ try:
14
+ return await asyncio.wait_for(fut, timeout=0.1)
15
+ except TimeoutError:
16
+ return None
@@ -0,0 +1,58 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import os
4
+ import sys
5
+ from importlib import import_module
6
+ from inspect import iscoroutinefunction
7
+ from typing import Callable
8
+
9
+ from crawlo.settings.setting_manager import SettingManager
10
+
11
+
12
+ def _get_closest(path='.'):
13
+ path = os.path.abspath(path)
14
+ return path
15
+
16
+
17
+ def _init_env():
18
+ closest = _get_closest()
19
+ if closest:
20
+ project_dir = os.path.dirname(closest)
21
+ sys.path.append(project_dir)
22
+
23
+
24
+ def get_settings(settings='settings'):
25
+ _settings = SettingManager()
26
+ _init_env()
27
+ _settings.set_settings(settings)
28
+ return _settings
29
+
30
+
31
+ def merge_settings(spider, settings):
32
+ if hasattr(spider, 'custom_settings'):
33
+ custom_settings = getattr(spider, 'custom_settings')
34
+ settings.update_attributes(custom_settings)
35
+
36
+
37
+ def load_class(_path):
38
+ if not isinstance(_path, str):
39
+ if callable(_path):
40
+ return _path
41
+ else:
42
+ raise TypeError(f"args expect str or object, got {_path}")
43
+
44
+ module_name, class_name = _path.rsplit('.', 1)
45
+ module = import_module(module_name)
46
+
47
+ try:
48
+ cls = getattr(module, class_name)
49
+ except AttributeError:
50
+ raise NameError(f"Module {module_name!r} has no class named {class_name!r}")
51
+ return cls
52
+
53
+
54
+ async def common_call(func: Callable, *args, **kwargs):
55
+ if iscoroutinefunction(func):
56
+ return await func(*args, **kwargs)
57
+ else:
58
+ return func(*args, **kwargs)
crawlo/utils/system.py ADDED
@@ -0,0 +1,11 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import platform
4
+
5
+ system_name = platform.system().lower()
6
+ if system_name == 'windows':
7
+ import asyncio
8
+ asyncio.set_event_loop_policy(
9
+ asyncio.WindowsSelectorEventLoopPolicy()
10
+ )
11
+
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: crawlo
3
+ Version: 1.0.0
4
+ Summary: feapder是一款支持异步的python爬虫框架
5
+ Home-page: https://github.com/crawl-coder/Crawlo.git
6
+ Author: crawl-coder
7
+ Author-email: crawlo@qq.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Requires-Python: >=3.6
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Requires-Dist: aiohttp>=3.12.6
16
+ Requires-Dist: httpx>=0.28.1
17
+ Requires-Dist: DBUtils>=2.0
18
+ Requires-Dist: parsel>=1.10.0
19
+ Requires-Dist: pymysql>=1.1.1
20
+ Requires-Dist: ujson>=5.10.0
21
+ Provides-Extra: render
22
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "render"
23
+ Requires-Dist: playwright; extra == "render"
24
+ Requires-Dist: selenium>=3.141.0; extra == "render"
25
+ Provides-Extra: all
26
+ Requires-Dist: bitarray>=1.5.3; extra == "all"
27
+ Requires-Dist: PyExecJS>=1.5.1; extra == "all"
28
+ Requires-Dist: pymongo>=3.10.1; extra == "all"
29
+ Requires-Dist: redis-py-cluster>=2.1.0; extra == "all"
30
+ Requires-Dist: webdriver-manager>=4.0.0; extra == "all"
31
+ Requires-Dist: playwright; extra == "all"
32
+ Requires-Dist: selenium>=3.141.0; extra == "all"
33
+ Dynamic: license-file
34
+
35
+ # Crawlo
36
+ 异步通用爬虫框架
@@ -0,0 +1,59 @@
1
+ crawlo/__init__.py,sha256=yzg6DkuCNkz27Tn5YDdaboZ1Ah96FwV6eGNEDO48lxA,170
2
+ crawlo/__version__.py,sha256=j4YpqEiBQsl3obE5LH55xbuejIMjUwUNp-k4JXOjEFU,23
3
+ crawlo/crawler.py,sha256=lIx3-_TKXdCDhpfw4lYcizpyoclapjAHfGchBJ9DmTU,3679
4
+ crawlo/event.py,sha256=ZhoPW5CglCEuZNFEwviSCBIw0pT5O6jT98bqYrDFd3E,324
5
+ crawlo/exceptions.py,sha256=trxM2c0jw50QsGSoFAKC2RrKpapOFHQDq0wQuLWqmKE,980
6
+ crawlo/stats_collector.py,sha256=jhAW8k0SzjqelkpiWpfGmMw2DBkgTjpwnObqTNDOp6A,1286
7
+ crawlo/subscriber.py,sha256=4stxeXqNK6RB7oqo0wKJdUw2Ym3b6UBMhZ4pRrjSMEU,1001
8
+ crawlo/task_manager.py,sha256=D9m-nqnGj-FZPtGk4CdwZX3Gw7IWyYvTS7CHpRGWc_w,748
9
+ crawlo/core/__init__.py,sha256=JYSAn15r8yWgRK_Nc69t_8tZCyb70MiPZKssA8wrYz0,43
10
+ crawlo/core/engine.py,sha256=mQfzY4Bm0Ysb5bPjoH3lMVFbn9WJuQ1UjBTReuOljzQ,5873
11
+ crawlo/core/processor.py,sha256=5RQPfffE8AnKnuajL9J2oc7QAeoSjATxwdxRU3_EoKM,1183
12
+ crawlo/core/scheduler.py,sha256=2z8qzZmj6zn9w5pE1yux6Vi7fDMq8Cto6n9Wyyo_DP8,1074
13
+ crawlo/downloader/__init__.py,sha256=72u2Hef4HaMfs9VCqEjbMtiaRXbaXmgNiJn6qy09LHs,2384
14
+ crawlo/downloader/aiohttp_downloader.py,sha256=4C2BDloKzwss16kfD7tH0WPugPbSSFxl-5-_DLWB0vM,3676
15
+ crawlo/downloader/httpx_downloader.py,sha256=ra6Ae_lv8pNyvLzPQYBgTNuBdMVBYi86kNt2OdZlcSo,1704
16
+ crawlo/extension/__init__.py,sha256=O2BVK1U3WwmurZb-PaYVz3g1tZ_iYUjCwilmUKf6844,1170
17
+ crawlo/extension/log_interval.py,sha256=FOWeTOuWtOpCz2UPV5F_--QIa8yomltSpjxbw3F7bkU,1971
18
+ crawlo/extension/log_stats.py,sha256=ZIIB6WKvYSxDIw7Mr52QkOSiX-tMGWU8Ifpd1EXiTUY,1677
19
+ crawlo/items/__init__.py,sha256=Ir04T486XuJJ-OCItxqO-78Jyt7B8JGLBsrgCO605PU,535
20
+ crawlo/items/items.py,sha256=7w0uojKu1GzY5SDfrlJtAhqVv6alHQjwkBkthSoJ-04,2856
21
+ crawlo/middleware/__init__.py,sha256=PSwpRLdBUopaQzBp1S0zK_TZbrRagQ4yzvgyLy4tBk8,570
22
+ crawlo/middleware/default_header.py,sha256=OVW4vpRPp3Y6qYXtiEYlGqVjCYcbuv1Iecc7zEgwCsI,1099
23
+ crawlo/middleware/download_delay.py,sha256=P2eyAJXwdLdC4yYuLhvKZVa1b5YQvQD0GpsR8aDW8-8,994
24
+ crawlo/middleware/middleware_manager.py,sha256=T4axTY89Z0BOwaWDWcUTABeDNTvyPFiyrbwj-H4sbSA,6629
25
+ crawlo/middleware/request_ignore.py,sha256=jdybWFVXuA5YsAPfZJFzLTWkYhEAewNgxuhFqczPW9M,1027
26
+ crawlo/middleware/response_code.py,sha256=vgXWv3mMu_v9URvhKA9myIFH4u6L4EwNme80wL4DCGc,677
27
+ crawlo/middleware/response_filter.py,sha256=O2gkV_Yjart8kmmXTGzrtZnb_Uuefap4uL2Cu01iRs4,863
28
+ crawlo/middleware/retry.py,sha256=sCRO5uh9cG8Ui3PODJEg_6PlxDD_lAoopaF2P7u6bUo,3148
29
+ crawlo/network/__init__.py,sha256=DVz1JpasjxCgOlXvm76gz-S18OXr4emG_J39yi5iVuA,130
30
+ crawlo/network/request.py,sha256=YokEk6fEBQF2ii-Oe18ZzVc262Ih6KcUZPLOpxcs6Mg,1474
31
+ crawlo/network/response.py,sha256=QikuOCgxS5yMh4Uh0QuMpqWfyR78vossCs-Va-sQ2YE,2993
32
+ crawlo/pipelines/__init__.py,sha256=IbXJ6B8LqxVVjeLNgL_12AxV6zbV8hNRQxAfMLjjSaw,273
33
+ crawlo/pipelines/console_pipeline.py,sha256=1zorg1xrIiCctiTkd38jm7Q-miKMnRFnJBjv7Tg0gyo,537
34
+ crawlo/pipelines/mongo_pipeline.py,sha256=DfXD10Ee-3q685EdS6du3Nn-f0EaBKJgdgsVlpcYhd8,67
35
+ crawlo/pipelines/mysql_pipeline.py,sha256=QAZToJAmy3xyTLOh7DkwCfxMDWQTLiTOLdfvZhkMP6E,67
36
+ crawlo/pipelines/pipeline_manager.py,sha256=FvpMuHwB5DWGzPsy1mZFdcyxzPN2mVL2aivUWlzrHx4,2183
37
+ crawlo/settings/__init__.py,sha256=NgYFLfk_Bw7h6KSoepJn_lMBSqVbCHebjKxaE3_eMgw,130
38
+ crawlo/settings/default_settings.py,sha256=4J8Kzc3dk09ogy5_FeaGA1KHUzHPMYg9LFIVFIrhuXY,979
39
+ crawlo/settings/setting_manager.py,sha256=4xXOzKwZCgAp8ybwvVcs2R--CsOD7c6dBIkj6DJHB3c,2998
40
+ crawlo/spider/__init__.py,sha256=9haJYfxX4Gx2iV3KK5Fry2fO9R-gEmAkR_pQjZTIErE,905
41
+ crawlo/templates/item_template.tmpl,sha256=0bGFnlwJRqstxMNEj1H_pEICybwoueRhs31QaDPXrS0,372
42
+ crawlo/templates/spider_template.tmpl,sha256=JzphuA87Yl_F1xR9zOIi_ZSazyT8eSNPxYYPMv3Uiko,835
43
+ crawlo/templates/project_template/main.py,sha256=BcCP294ycCPsHi_AMN7OAJtcrLvQdf91meH93PqbQgs,626
44
+ crawlo/templates/project_template/setting.py,sha256=Ce4nMbrdhL1ioRdTcB0vV_vK_50cfnwVqSvt49QsNkA,9395
45
+ crawlo/templates/project_template/items/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
+ crawlo/templates/project_template/spiders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
+ crawlo/utils/__init__.py,sha256=XCYumI8wJ1jU_Myn_K0LT-LVygPDUCdETCbXM3EWvlo,130
48
+ crawlo/utils/date_tools.py,sha256=bW3j9LxxuAUwykzuzaOYR3iY8U0g7wBPZL1ELf4lXVo,482
49
+ crawlo/utils/func_tools.py,sha256=sWnoa2SDAMK2NBdoIO2o3Ew6SqeJXcSNEzmWZUJcE6o,616
50
+ crawlo/utils/log.py,sha256=LU0J3boPCL-Kynx3wR_CAryRgScNmPPn4pBitLrrsX4,1028
51
+ crawlo/utils/pqueue.py,sha256=W1n23t9roVvDB84jQ4j7sk_Z4FoD8q3LbRRbH4B6Ej4,446
52
+ crawlo/utils/project.py,sha256=JHMV7SLDWMaPCwuwVY9KQqvu2VT1ZlXEQvpIzzNaSeo,1506
53
+ crawlo/utils/system.py,sha256=24zGmtHNhDFMGVo7ftMV-Pqg6_5d63zsyNey9udvJJk,248
54
+ crawlo-1.0.0.dist-info/licenses/LICENSE,sha256=f96mrub4oLJnuMcNwMjHWMksci5brQ2a2Fu0R7YEgnk,1125
55
+ crawlo-1.0.0.dist-info/METADATA,sha256=wS6yr3avxrdzm4x4WPN7fBKgO5jobpQ8UpvrCUC1aQw,1266
56
+ crawlo-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
57
+ crawlo-1.0.0.dist-info/entry_points.txt,sha256=GD9PBhKQN83EaxPYtz7NhcGeZeh3bdr2jWbTixOs-lw,59
58
+ crawlo-1.0.0.dist-info/top_level.txt,sha256=Dwuv-Y1aGSJD3mjFrCdNGQ8EHroMj7RgVcxDdcczx4k,7
59
+ crawlo-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ crawlo = crawlo.commands.cmdline:execute
@@ -0,0 +1,23 @@
1
+ MIT License
2
+
3
+ Modifications:
4
+
5
+ Copyright (c) 2020 crawl-coder <2251018029@qq.com>
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ crawlo