crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +34 -34
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +285 -285
- crawlo/commands/startproject.py +196 -196
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +279 -279
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +171 -171
- crawlo/core/enhanced_engine.py +189 -189
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +166 -162
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +242 -242
- crawlo/downloader/aiohttp_downloader.py +212 -212
- crawlo/downloader/cffi_downloader.py +251 -251
- crawlo/downloader/httpx_downloader.py +259 -257
- crawlo/event.py +11 -11
- crawlo/exceptions.py +82 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +242 -242
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +248 -248
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +125 -125
- crawlo/mode_manager.py +200 -200
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +311 -311
- crawlo/network/response.py +271 -269
- crawlo/pipelines/__init__.py +22 -13
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +225 -0
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +116 -0
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/pipelines/redis_dedup_pipeline.py +163 -0
- crawlo/project.py +153 -153
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +307 -303
- crawlo/queue/redis_priority_queue.py +208 -191
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +245 -226
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +86 -86
- crawlo/templates/project/pipelines.py.tmpl +341 -335
- crawlo/templates/project/run.py.tmpl +251 -238
- crawlo/templates/project/settings.py.tmpl +250 -247
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +177 -177
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/controlled_spider_mixin.py +439 -335
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
- crawlo-1.1.3.dist-info/RECORD +113 -0
- examples/__init__.py +7 -7
- examples/controlled_spider_example.py +205 -0
- tests/__init__.py +7 -7
- tests/test_final_validation.py +153 -153
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_redis_config.py +28 -28
- tests/test_redis_queue.py +224 -224
- tests/test_request_serialization.py +70 -70
- tests/test_scheduler.py +241 -241
- crawlo-1.1.2.dist-info/RECORD +0 -108
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -1,178 +1,178 @@
|
|
|
1
|
-
# -*- coding: UTF-8 -*-
|
|
2
|
-
"""
|
|
3
|
-
{{project_name}}.spiders.{{spider_name}}
|
|
4
|
-
=======================================
|
|
5
|
-
由 `crawlo genspider` 命令生成的爬虫。
|
|
6
|
-
基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
|
|
7
|
-
|
|
8
|
-
使用示例:
|
|
9
|
-
crawlo crawl {{spider_name}}
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
from crawlo.spider import Spider
|
|
13
|
-
from crawlo import Request
|
|
14
|
-
# from {{project_name}}.items import {{item_class}} # 可选:导入数据项
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class {{class_name}}(Spider):
|
|
18
|
-
"""
|
|
19
|
-
爬虫:{{spider_name}}
|
|
20
|
-
|
|
21
|
-
功能说明:
|
|
22
|
-
- 支持并发爬取
|
|
23
|
-
- 自动去重过滤
|
|
24
|
-
- 错误重试机制
|
|
25
|
-
- 数据管道处理
|
|
26
|
-
"""
|
|
27
|
-
name = '{{spider_name}}'
|
|
28
|
-
allowed_domains = ['{{domain}}']
|
|
29
|
-
start_urls = ['https://{{domain}}/']
|
|
30
|
-
|
|
31
|
-
# 高级配置(可选)
|
|
32
|
-
# custom_settings = {
|
|
33
|
-
# 'DOWNLOAD_DELAY': 2.0,
|
|
34
|
-
# 'CONCURRENCY': 4,
|
|
35
|
-
# 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
|
|
36
|
-
# }
|
|
37
|
-
|
|
38
|
-
def start_requests(self):
|
|
39
|
-
"""
|
|
40
|
-
生成初始请求。
|
|
41
|
-
|
|
42
|
-
支持自定义请求头、代理、优先级等。
|
|
43
|
-
"""
|
|
44
|
-
headers = {
|
|
45
|
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
46
|
-
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
for url in self.start_urls:
|
|
50
|
-
yield Request(
|
|
51
|
-
url=url,
|
|
52
|
-
callback=self.parse,
|
|
53
|
-
headers=headers,
|
|
54
|
-
# meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
|
|
55
|
-
# priority=10, # 请求优先级(数字越大优先级越高)
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
def parse(self, response):
|
|
59
|
-
"""
|
|
60
|
-
解析响应的主方法。
|
|
61
|
-
|
|
62
|
-
Args:
|
|
63
|
-
response: 响应对象,包含页面内容和元数据
|
|
64
|
-
|
|
65
|
-
Yields:
|
|
66
|
-
Request: 新的请求对象(用于深度爬取)
|
|
67
|
-
Item: 数据项对象(用于数据存储)
|
|
68
|
-
"""
|
|
69
|
-
self.logger.info(f'正在解析页面: {response.url}')
|
|
70
|
-
|
|
71
|
-
# ================== 数据提取示例 ==================
|
|
72
|
-
|
|
73
|
-
# 提取数据并创建 Item
|
|
74
|
-
# item = {{item_class}}()
|
|
75
|
-
# item['title'] = response.xpath('//title/text()').get(default='')
|
|
76
|
-
# item['url'] = response.url
|
|
77
|
-
# item['content'] = response.xpath('//div[@class="content"]//text()').getall()
|
|
78
|
-
# yield item
|
|
79
|
-
|
|
80
|
-
# 直接返回字典(简单数据)
|
|
81
|
-
yield {
|
|
82
|
-
'title': response.xpath('//title/text()').get(default=''),
|
|
83
|
-
'url': response.url,
|
|
84
|
-
'status_code': response.status_code,
|
|
85
|
-
# 'description': response.xpath('//meta[@name="description"]/@content').get(),
|
|
86
|
-
# 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
# ================== 链接提取示例 ==================
|
|
90
|
-
|
|
91
|
-
# 提取并跟进链接
|
|
92
|
-
# links = response.xpath('//a/@href').getall()
|
|
93
|
-
# for link in links:
|
|
94
|
-
# # 过滤有效链接
|
|
95
|
-
# if link and not link.startswith(('javascript:', 'mailto:', '#')):
|
|
96
|
-
# yield response.follow(
|
|
97
|
-
# link,
|
|
98
|
-
# callback=self.parse_detail, # 或者 self.parse 继续递归
|
|
99
|
-
# meta={'parent_url': response.url} # 传递父页面信息
|
|
100
|
-
# )
|
|
101
|
-
|
|
102
|
-
# 用 CSS 选择器提取链接
|
|
103
|
-
# for link in response.css('a.item-link::attr(href)').getall():
|
|
104
|
-
# yield response.follow(link, callback=self.parse_detail)
|
|
105
|
-
|
|
106
|
-
# ================== 分页处理示例 ==================
|
|
107
|
-
|
|
108
|
-
# 处理分页
|
|
109
|
-
# next_page = response.xpath('//a[@class="next"]/@href').get()
|
|
110
|
-
# if next_page:
|
|
111
|
-
# yield response.follow(next_page, callback=self.parse)
|
|
112
|
-
|
|
113
|
-
# 数字分页
|
|
114
|
-
# current_page = int(response.meta.get('page', 1))
|
|
115
|
-
# max_pages = 100 # 设置最大页数
|
|
116
|
-
# if current_page < max_pages:
|
|
117
|
-
# next_url = f'https://{{domain}}/page/{current_page + 1}'
|
|
118
|
-
# yield Request(
|
|
119
|
-
# url=next_url,
|
|
120
|
-
# callback=self.parse,
|
|
121
|
-
# meta={'page': current_page + 1}
|
|
122
|
-
# )
|
|
123
|
-
|
|
124
|
-
def parse_detail(self, response):
|
|
125
|
-
"""
|
|
126
|
-
解析详情页面的方法(可选)。
|
|
127
|
-
|
|
128
|
-
用于处理从列表页跳转而来的详情页。
|
|
129
|
-
"""
|
|
130
|
-
self.logger.info(f'正在解析详情页: {response.url}')
|
|
131
|
-
|
|
132
|
-
# parent_url = response.meta.get('parent_url', '')
|
|
133
|
-
#
|
|
134
|
-
# yield {
|
|
135
|
-
# 'title': response.xpath('//h1/text()').get(default=''),
|
|
136
|
-
# 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
|
|
137
|
-
# 'url': response.url,
|
|
138
|
-
# 'parent_url': parent_url,
|
|
139
|
-
# 'publish_time': response.xpath('//time/@datetime').get(),
|
|
140
|
-
# }
|
|
141
|
-
|
|
142
|
-
pass
|
|
143
|
-
|
|
144
|
-
def parse_error(self, failure):
|
|
145
|
-
"""
|
|
146
|
-
处理请求失败的方法(可选)。
|
|
147
|
-
|
|
148
|
-
当请求失败时会被调用。
|
|
149
|
-
"""
|
|
150
|
-
self.logger.error(f'请求失败: {failure.request.url} - {failure.value}')
|
|
151
|
-
|
|
152
|
-
# 可以选择重试或记录失败信息
|
|
153
|
-
# yield {
|
|
154
|
-
# 'error_url': failure.request.url,
|
|
155
|
-
# 'error_message': str(failure.value),
|
|
156
|
-
# 'error_type': failure.type.__name__,
|
|
157
|
-
# }
|
|
158
|
-
|
|
159
|
-
def spider_opened(self, spider):
|
|
160
|
-
"""
|
|
161
|
-
爬虫启动时的回调方法(可选)。
|
|
162
|
-
"""
|
|
163
|
-
self.logger.info(f'爬虫 {spider.name} 已启动')
|
|
164
|
-
|
|
165
|
-
# 初始化操作,例如连接数据库、加载配置等
|
|
166
|
-
# self.database = self.connect_database()
|
|
167
|
-
# self.cookies = self.load_cookies()
|
|
168
|
-
|
|
169
|
-
def spider_closed(self, spider, reason):
|
|
170
|
-
"""
|
|
171
|
-
爬虫关闭时的回调方法(可选)。
|
|
172
|
-
"""
|
|
173
|
-
self.logger.info(f'爬虫 {spider.name} 已关闭,原因: {reason}')
|
|
174
|
-
|
|
175
|
-
# 清理操作,例如关闭数据库连接、保存状态等
|
|
176
|
-
# if hasattr(self, 'database'):
|
|
177
|
-
# self.database.close()
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
{{project_name}}.spiders.{{spider_name}}
|
|
4
|
+
=======================================
|
|
5
|
+
由 `crawlo genspider` 命令生成的爬虫。
|
|
6
|
+
基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
|
|
7
|
+
|
|
8
|
+
使用示例:
|
|
9
|
+
crawlo crawl {{spider_name}}
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from crawlo.spider import Spider
|
|
13
|
+
from crawlo import Request
|
|
14
|
+
# from {{project_name}}.items import {{item_class}} # 可选:导入数据项
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class {{class_name}}(Spider):
|
|
18
|
+
"""
|
|
19
|
+
爬虫:{{spider_name}}
|
|
20
|
+
|
|
21
|
+
功能说明:
|
|
22
|
+
- 支持并发爬取
|
|
23
|
+
- 自动去重过滤
|
|
24
|
+
- 错误重试机制
|
|
25
|
+
- 数据管道处理
|
|
26
|
+
"""
|
|
27
|
+
name = '{{spider_name}}'
|
|
28
|
+
allowed_domains = ['{{domain}}']
|
|
29
|
+
start_urls = ['https://{{domain}}/']
|
|
30
|
+
|
|
31
|
+
# 高级配置(可选)
|
|
32
|
+
# custom_settings = {
|
|
33
|
+
# 'DOWNLOAD_DELAY': 2.0,
|
|
34
|
+
# 'CONCURRENCY': 4,
|
|
35
|
+
# 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
|
|
36
|
+
# }
|
|
37
|
+
|
|
38
|
+
def start_requests(self):
|
|
39
|
+
"""
|
|
40
|
+
生成初始请求。
|
|
41
|
+
|
|
42
|
+
支持自定义请求头、代理、优先级等。
|
|
43
|
+
"""
|
|
44
|
+
headers = {
|
|
45
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
46
|
+
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
for url in self.start_urls:
|
|
50
|
+
yield Request(
|
|
51
|
+
url=url,
|
|
52
|
+
callback=self.parse,
|
|
53
|
+
headers=headers,
|
|
54
|
+
# meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
|
|
55
|
+
# priority=10, # 请求优先级(数字越大优先级越高)
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def parse(self, response):
|
|
59
|
+
"""
|
|
60
|
+
解析响应的主方法。
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
response: 响应对象,包含页面内容和元数据
|
|
64
|
+
|
|
65
|
+
Yields:
|
|
66
|
+
Request: 新的请求对象(用于深度爬取)
|
|
67
|
+
Item: 数据项对象(用于数据存储)
|
|
68
|
+
"""
|
|
69
|
+
self.logger.info(f'正在解析页面: {response.url}')
|
|
70
|
+
|
|
71
|
+
# ================== 数据提取示例 ==================
|
|
72
|
+
|
|
73
|
+
# 提取数据并创建 Item
|
|
74
|
+
# item = {{item_class}}()
|
|
75
|
+
# item['title'] = response.xpath('//title/text()').get(default='')
|
|
76
|
+
# item['url'] = response.url
|
|
77
|
+
# item['content'] = response.xpath('//div[@class="content"]//text()').getall()
|
|
78
|
+
# yield item
|
|
79
|
+
|
|
80
|
+
# 直接返回字典(简单数据)
|
|
81
|
+
yield {
|
|
82
|
+
'title': response.xpath('//title/text()').get(default=''),
|
|
83
|
+
'url': response.url,
|
|
84
|
+
'status_code': response.status_code,
|
|
85
|
+
# 'description': response.xpath('//meta[@name="description"]/@content').get(),
|
|
86
|
+
# 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
# ================== 链接提取示例 ==================
|
|
90
|
+
|
|
91
|
+
# 提取并跟进链接
|
|
92
|
+
# links = response.xpath('//a/@href').getall()
|
|
93
|
+
# for link in links:
|
|
94
|
+
# # 过滤有效链接
|
|
95
|
+
# if link and not link.startswith(('javascript:', 'mailto:', '#')):
|
|
96
|
+
# yield response.follow(
|
|
97
|
+
# link,
|
|
98
|
+
# callback=self.parse_detail, # 或者 self.parse 继续递归
|
|
99
|
+
# meta={'parent_url': response.url} # 传递父页面信息
|
|
100
|
+
# )
|
|
101
|
+
|
|
102
|
+
# 用 CSS 选择器提取链接
|
|
103
|
+
# for link in response.css('a.item-link::attr(href)').getall():
|
|
104
|
+
# yield response.follow(link, callback=self.parse_detail)
|
|
105
|
+
|
|
106
|
+
# ================== 分页处理示例 ==================
|
|
107
|
+
|
|
108
|
+
# 处理分页
|
|
109
|
+
# next_page = response.xpath('//a[@class="next"]/@href').get()
|
|
110
|
+
# if next_page:
|
|
111
|
+
# yield response.follow(next_page, callback=self.parse)
|
|
112
|
+
|
|
113
|
+
# 数字分页
|
|
114
|
+
# current_page = int(response.meta.get('page', 1))
|
|
115
|
+
# max_pages = 100 # 设置最大页数
|
|
116
|
+
# if current_page < max_pages:
|
|
117
|
+
# next_url = f'https://{{domain}}/page/{current_page + 1}'
|
|
118
|
+
# yield Request(
|
|
119
|
+
# url=next_url,
|
|
120
|
+
# callback=self.parse,
|
|
121
|
+
# meta={'page': current_page + 1}
|
|
122
|
+
# )
|
|
123
|
+
|
|
124
|
+
def parse_detail(self, response):
|
|
125
|
+
"""
|
|
126
|
+
解析详情页面的方法(可选)。
|
|
127
|
+
|
|
128
|
+
用于处理从列表页跳转而来的详情页。
|
|
129
|
+
"""
|
|
130
|
+
self.logger.info(f'正在解析详情页: {response.url}')
|
|
131
|
+
|
|
132
|
+
# parent_url = response.meta.get('parent_url', '')
|
|
133
|
+
#
|
|
134
|
+
# yield {
|
|
135
|
+
# 'title': response.xpath('//h1/text()').get(default=''),
|
|
136
|
+
# 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
|
|
137
|
+
# 'url': response.url,
|
|
138
|
+
# 'parent_url': parent_url,
|
|
139
|
+
# 'publish_time': response.xpath('//time/@datetime').get(),
|
|
140
|
+
# }
|
|
141
|
+
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
def parse_error(self, failure):
|
|
145
|
+
"""
|
|
146
|
+
处理请求失败的方法(可选)。
|
|
147
|
+
|
|
148
|
+
当请求失败时会被调用。
|
|
149
|
+
"""
|
|
150
|
+
self.logger.error(f'请求失败: {failure.request.url} - {failure.value}')
|
|
151
|
+
|
|
152
|
+
# 可以选择重试或记录失败信息
|
|
153
|
+
# yield {
|
|
154
|
+
# 'error_url': failure.request.url,
|
|
155
|
+
# 'error_message': str(failure.value),
|
|
156
|
+
# 'error_type': failure.type.__name__,
|
|
157
|
+
# }
|
|
158
|
+
|
|
159
|
+
def spider_opened(self, spider):
|
|
160
|
+
"""
|
|
161
|
+
爬虫启动时的回调方法(可选)。
|
|
162
|
+
"""
|
|
163
|
+
self.logger.info(f'爬虫 {spider.name} 已启动')
|
|
164
|
+
|
|
165
|
+
# 初始化操作,例如连接数据库、加载配置等
|
|
166
|
+
# self.database = self.connect_database()
|
|
167
|
+
# self.cookies = self.load_cookies()
|
|
168
|
+
|
|
169
|
+
def spider_closed(self, spider, reason):
|
|
170
|
+
"""
|
|
171
|
+
爬虫关闭时的回调方法(可选)。
|
|
172
|
+
"""
|
|
173
|
+
self.logger.info(f'爬虫 {spider.name} 已关闭,原因: {reason}')
|
|
174
|
+
|
|
175
|
+
# 清理操作,例如关闭数据库连接、保存状态等
|
|
176
|
+
# if hasattr(self, 'database'):
|
|
177
|
+
# self.database.close()
|
|
178
178
|
# self.save_cookies()
|
crawlo/utils/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
# @Time : 2025-02-05 13:57
|
|
5
|
-
# @Author : oscar
|
|
6
|
-
# @Desc : None
|
|
7
|
-
"""
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
# @Time : 2025-02-05 13:57
|
|
5
|
+
# @Author : oscar
|
|
6
|
+
# @Desc : None
|
|
7
|
+
"""
|