crawlo 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (68) hide show
  1. crawlo/__init__.py +2 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/genspider.py +68 -42
  4. crawlo/commands/list.py +102 -93
  5. crawlo/commands/startproject.py +89 -4
  6. crawlo/commands/utils.py +187 -0
  7. crawlo/config.py +280 -0
  8. crawlo/core/engine.py +16 -3
  9. crawlo/core/enhanced_engine.py +190 -0
  10. crawlo/core/scheduler.py +113 -8
  11. crawlo/crawler.py +840 -307
  12. crawlo/downloader/__init__.py +181 -17
  13. crawlo/downloader/aiohttp_downloader.py +15 -2
  14. crawlo/downloader/cffi_downloader.py +11 -1
  15. crawlo/downloader/httpx_downloader.py +14 -3
  16. crawlo/filters/__init__.py +122 -5
  17. crawlo/filters/aioredis_filter.py +128 -36
  18. crawlo/filters/memory_filter.py +99 -32
  19. crawlo/middleware/proxy.py +11 -8
  20. crawlo/middleware/retry.py +40 -5
  21. crawlo/mode_manager.py +201 -0
  22. crawlo/network/__init__.py +17 -3
  23. crawlo/network/request.py +118 -10
  24. crawlo/network/response.py +131 -28
  25. crawlo/pipelines/__init__.py +1 -1
  26. crawlo/pipelines/csv_pipeline.py +317 -0
  27. crawlo/pipelines/json_pipeline.py +219 -0
  28. crawlo/queue/__init__.py +0 -0
  29. crawlo/queue/pqueue.py +37 -0
  30. crawlo/queue/queue_manager.py +304 -0
  31. crawlo/queue/redis_priority_queue.py +192 -0
  32. crawlo/settings/default_settings.py +68 -9
  33. crawlo/spider/__init__.py +576 -66
  34. crawlo/task_manager.py +4 -1
  35. crawlo/templates/project/middlewares.py.tmpl +56 -45
  36. crawlo/templates/project/pipelines.py.tmpl +308 -36
  37. crawlo/templates/project/run.py.tmpl +239 -0
  38. crawlo/templates/project/settings.py.tmpl +211 -17
  39. crawlo/templates/spider/spider.py.tmpl +153 -7
  40. crawlo/utils/controlled_spider_mixin.py +336 -0
  41. crawlo/utils/large_scale_config.py +287 -0
  42. crawlo/utils/large_scale_helper.py +344 -0
  43. crawlo/utils/queue_helper.py +176 -0
  44. crawlo/utils/request_serializer.py +220 -0
  45. crawlo-1.1.2.dist-info/METADATA +567 -0
  46. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/RECORD +54 -46
  47. tests/test_final_validation.py +154 -0
  48. tests/test_redis_config.py +29 -0
  49. tests/test_redis_queue.py +225 -0
  50. tests/test_request_serialization.py +71 -0
  51. tests/test_scheduler.py +242 -0
  52. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  53. crawlo/utils/pqueue.py +0 -174
  54. crawlo-1.1.1.dist-info/METADATA +0 -220
  55. examples/baidu_spider/__init__.py +0 -7
  56. examples/baidu_spider/demo.py +0 -94
  57. examples/baidu_spider/items.py +0 -46
  58. examples/baidu_spider/middleware.py +0 -49
  59. examples/baidu_spider/pipeline.py +0 -55
  60. examples/baidu_spider/run.py +0 -27
  61. examples/baidu_spider/settings.py +0 -121
  62. examples/baidu_spider/spiders/__init__.py +0 -7
  63. examples/baidu_spider/spiders/bai_du.py +0 -61
  64. examples/baidu_spider/spiders/miit.py +0 -159
  65. examples/baidu_spider/spiders/sina.py +0 -79
  66. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
  67. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
  68. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
@@ -1,61 +0,0 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-02-05 13:05
5
- # @Author : oscar
6
- # @Desc : None
7
- """
8
- import asyncio
9
- from crawlo import Request
10
- from crawlo.spider import Spider
11
-
12
- from items import BauDuItem
13
-
14
-
15
- class BaiDuSpider(Spider):
16
- start_urls = ["https://www.baidu.com/", "https://www.baidu.com/"]
17
-
18
- custom_settings = {
19
- 'CONCURRENCY': 1
20
- }
21
-
22
- name = "bai_du"
23
-
24
- # headers = {
25
- # "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
26
- # }
27
- #
28
- user_gent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
29
-
30
- async def parse(self, response):
31
- for i in range(5):
32
- url = f"https://www.baidu.com"
33
- # url = f"https://www.httpbin.org/404"
34
- r = Request(url=url, callback=self.parse_page, dont_filter=True)
35
- yield r
36
-
37
- async def parse_page(self, response):
38
- for i in range(5):
39
- url = f"https://www.baidu.com"
40
- meta = {'test': 'hhhh'}
41
- r = Request(url=url, callback=self.parse_detail, meta=meta, dont_filter=False)
42
- yield r
43
-
44
- def parse_detail(self, response):
45
- item = BauDuItem()
46
- item['title'] = response.xpath('//title/text()').get()
47
-
48
- item['url'] = response.url
49
-
50
- yield item
51
-
52
- async def spider_opened(self):
53
- pass
54
-
55
- async def spider_closed(self):
56
- pass
57
-
58
-
59
- if __name__ == '__main__':
60
- b = BaiDuSpider()
61
- b.start_requests()
@@ -1,159 +0,0 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-08-22 14:00
5
- # @Author : oscar
6
- # @Desc : 爬取工信部无线电设备核准信息(支持全量34652页)
7
- """
8
-
9
- import json
10
- import asyncio
11
- import random
12
-
13
- from crawlo import Request
14
- from crawlo.spider import Spider
15
- from crawlo.utils.log import get_logger
16
- from crawlo.utils.date_tools import to_datetime
17
-
18
- # 引入定义好的 Item
19
- from examples.baidu_spider.items import MiitDeviceItem
20
-
21
-
22
- logger = get_logger(__name__)
23
-
24
-
25
- class MiitDeviceSpider(Spider):
26
- name = 'miit_device'
27
- allowed_domains = ['ythzxfw.miit.gov.cn']
28
-
29
- # 字段映射表
30
- FIELD_MAPPING = {
31
- "articleField01": ("核准证编号", "approval_certificate_no"),
32
- "articleField02": ("设备名称", "device_name"),
33
- "articleField03": ("设备型号", "model_number"),
34
- "articleField04": ("申请单位", "applicant"),
35
- "articleField05": ("备注", "remarks"),
36
- "articleField06": ("有效期", "validity_period"),
37
- "articleField07": ("频率容限", "frequency_tolerance"),
38
- "articleField08": ("频率范围", "frequency_range"),
39
- "articleField09": ("发射功率", "transmission_power"),
40
- "articleField10": ("占用带宽", "occupied_bandwidth"),
41
- "articleField11": ("杂散发射限制", "spurious_emission_limit"),
42
- "articleField12": ("发证日期", "issue_date"),
43
- "articleField13": ("核准代码", "approval_code"),
44
- "articleField14": ("CMIIT ID", "cmiit_id"),
45
- "articleField15": ("调制方式", "modulation_scheme"),
46
- "articleField16": ("技术体制/功能模块", "technology_module"),
47
- "createTime": ("createTime", "create_time"),
48
- "articleId": ("articleId", "article_id")
49
- }
50
-
51
- headers = {
52
- "Accept": "application/json, text/plain, */*",
53
- "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
54
- "Authorization": "null",
55
- "Cache-Control": "no-cache",
56
- "Connection": "keep-alive",
57
- "Content-Type": "application/json;charset=UTF-8",
58
- "Origin": "https://ythzxfw.miit.gov.cn",
59
- "Pragma": "no-cache",
60
- "Referer": "https://ythzxfw.miit.gov.cn/oldyth/resultQuery",
61
- "Sec-Fetch-Dest": "empty",
62
- "Sec-Fetch-Mode": "cors",
63
- "Sec-Fetch-Site": "same-origin",
64
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
65
- "sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
66
- "sec-ch-ua-mobile": "?0",
67
- "sec-ch-ua-platform": '"macOS"'
68
- }
69
-
70
- cookies = {
71
- "wzws_sessionid": "gjdjYmMyNYFkZjRiZjCgaKkOx4AyNDBlOjQ3ZTozMmUwOmQ5MmI6ZjFjZTphNWJiOjk5ZmU6OTU4OQ==",
72
- "ariauseGraymode": "false",
73
- "Hm_lvt_a73626d298a849004aacc34159f68abd": "1755909833",
74
- "Hm_lpvt_a73626d298a849004aacc34159f68abd": "1755909833",
75
- "HMACCOUNT": "6C5E4C6C47DC62FF"
76
- }
77
-
78
- # 分页配置
79
- start_page = 1 # 起始页
80
- end_page = 34652 # 总页数
81
- current_page = 1
82
- page_size = 5 # 每页条数
83
-
84
- # 请求间隔(秒),防止被封
85
- min_delay = 1.5
86
- max_delay = 3.0
87
-
88
- def start_requests(self):
89
- # 从起始页开始
90
- yield self.make_request(self.start_page)
91
-
92
- def make_request(self, page):
93
- """封装请求创建"""
94
- data = {
95
- "categoryId": "352",
96
- "currentPage": page,
97
- "pageSize": self.page_size,
98
- "searchContent": ""
99
- }
100
- return Request(
101
- method='POST',
102
- url='https://ythzxfw.miit.gov.cn/oldyth/user-center/tbAppSearch/selectResult',
103
- headers=self.headers,
104
- cookies=self.cookies,
105
- body=json.dumps(data, separators=(',', ':'), ensure_ascii=False),
106
- callback=self.parse,
107
- dont_filter=True,
108
- meta={'page': page} # 记录当前页码,便于日志和调试
109
- )
110
-
111
- async def parse(self, response):
112
- page = response.meta.get('page', 'unknown')
113
- try:
114
- json_data = response.json()
115
- success = json_data.get("success")
116
- code = json_data.get("code")
117
-
118
- if not success or code != 200:
119
- logger.error(f"第 {page} 页请求失败: code={code}, msg={json_data.get('msg')}")
120
- return
121
-
122
- tb_app_article = json_data.get('params', {}).get('tbAppArticle', {})
123
- records = tb_app_article.get('list', [])
124
- total_count = tb_app_article.get('total', 0) # 总数据条数,例如 173256
125
-
126
- logger.info(f"✅ 第 {page} 页解析成功,共 {len(records)} 条数据。总计: {total_count} 条")
127
-
128
- for raw_item in records:
129
- item = MiitDeviceItem()
130
- for field_key, (chinese_name, english_field) in self.FIELD_MAPPING.items():
131
- value = raw_item.get(field_key)
132
- if english_field == 'issue_date' and value:
133
- value = to_datetime(value.split()[0])
134
- item[english_field] = value
135
- yield item
136
-
137
- # ✅ 核心修复:根据 total_count 和 page_size 计算真实总页数
138
- # 注意:需要向上取整,例如 173256 / 5 = 34651.2,应该有 34652 页
139
- import math
140
- calculated_total_pages = math.ceil(total_count / self.page_size)
141
-
142
- # 现在使用 calculated_total_pages 来判断是否继续翻页
143
- next_page = page + 1
144
- if next_page <= calculated_total_pages:
145
- delay = random.uniform(self.min_delay, self.max_delay)
146
- logger.debug(f"等待 {delay:.2f}s 后请求第 {next_page} 页...")
147
- await asyncio.sleep(delay)
148
- yield self.make_request(next_page)
149
- else:
150
- logger.info(f"🎉 爬取完成!已到达最后一页 {calculated_total_pages}")
151
-
152
- except Exception as e:
153
- logger.error(f"❌ 解析第 {page} 页失败: {e}, 响应: {response.text[:500]}...")
154
-
155
- async def spider_opened(self):
156
- logger.info(f"MiitDeviceSpider 启动,准备爬取 {self.start_page} 至 {self.end_page} 页...")
157
-
158
- async def spider_closed(self):
159
- logger.info("MiitDeviceSpider 结束。")
@@ -1,79 +0,0 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-02-05 13:05
5
- # @Author : oscar
6
- # @Desc : None
7
- """
8
- import time
9
-
10
- from crawlo import Request
11
- from crawlo.spider import Spider
12
- from crawlo.utils.date_tools import to_datetime
13
-
14
- from examples.baidu_spider.items import ArticleItem
15
-
16
-
17
- class SinaSpider(Spider):
18
- # 获取当前时间戳,并减去 10 分钟(600 秒)
19
- current_time_minus_10min = int(time.time()) - 6000
20
- # 构造 URL
21
- url = f'https://news.10jqka.com.cn/tapp/news/push/stock/?page=1&tag=&track=website&ctime={current_time_minus_10min}'
22
-
23
- start_urls = [url]
24
- name = 'sina'
25
- # mysql_table = 'news_10jqka'
26
-
27
- allowed_domains = ['*']
28
-
29
- def start_requests(self):
30
- for url in self.start_urls:
31
- yield Request(url=url, callback=self.parse, dont_filter=True)
32
-
33
- async def parse(self, response):
34
- jsonp_str = response.json()
35
- rows = jsonp_str.get('data', {}).get('list', [])
36
- for row in rows:
37
- article_id = row.get('id')
38
- title = row.get('title')
39
- digest = row.get('digest')
40
- short = row.get('short')
41
- detail_url = row.get('url')
42
- tag = row.get('tag')
43
- ctime = row.get('ctime')
44
- source = row.get('source')
45
- meta = {
46
- 'article_id': article_id,
47
- 'title': title,
48
- 'digest': digest,
49
- 'short': short,
50
- 'detail_url': detail_url,
51
- 'source': source,
52
- 'tag': tag,
53
- 'ctime': to_datetime(int(ctime))
54
- }
55
-
56
- yield Request(url=detail_url, callback=self.parse_detail, encoding='gbk', meta=meta)
57
-
58
- @staticmethod
59
- async def parse_detail(response):
60
- item = ArticleItem()
61
- meta = response.meta
62
- content = ''.join(response.xpath('//*[@id="contentApp"]/p/text()').extract()).strip()
63
- ctime = meta.get('ctime')
64
- item['article_id'] = meta.get('article_id')
65
- item['title'] = meta.get('title')
66
- item['digest'] = content
67
- item['short'] = meta.get('short')
68
- item['url'] = meta.get('detail_url')
69
- item['tag'] = meta.get('tag').strip()
70
- item['ctime'] = to_datetime(ctime)
71
- item['source'] = meta.get('source')
72
-
73
- yield item
74
-
75
- async def spider_opened(self):
76
- pass
77
-
78
- async def spider_closed(self):
79
- pass
File without changes