crawlo 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (112) hide show
  1. crawlo/__init__.py +25 -9
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +41 -0
  4. crawlo/commands/__init__.py +10 -0
  5. crawlo/commands/genspider.py +111 -0
  6. crawlo/commands/run.py +149 -0
  7. crawlo/commands/startproject.py +101 -0
  8. crawlo/core/__init__.py +2 -2
  9. crawlo/core/engine.py +158 -158
  10. crawlo/core/processor.py +40 -40
  11. crawlo/core/scheduler.py +57 -57
  12. crawlo/crawler.py +219 -242
  13. crawlo/downloader/__init__.py +78 -78
  14. crawlo/downloader/aiohttp_downloader.py +200 -259
  15. crawlo/downloader/cffi_downloader.py +277 -0
  16. crawlo/downloader/httpx_downloader.py +246 -187
  17. crawlo/event.py +11 -11
  18. crawlo/exceptions.py +78 -64
  19. crawlo/extension/__init__.py +31 -31
  20. crawlo/extension/log_interval.py +49 -49
  21. crawlo/extension/log_stats.py +44 -44
  22. crawlo/extension/logging_extension.py +35 -0
  23. crawlo/filters/__init__.py +37 -37
  24. crawlo/filters/aioredis_filter.py +150 -150
  25. crawlo/filters/memory_filter.py +202 -202
  26. crawlo/items/__init__.py +22 -62
  27. crawlo/items/base.py +31 -0
  28. crawlo/items/fields.py +54 -0
  29. crawlo/items/items.py +105 -119
  30. crawlo/middleware/__init__.py +21 -21
  31. crawlo/middleware/default_header.py +32 -32
  32. crawlo/middleware/download_delay.py +28 -28
  33. crawlo/middleware/middleware_manager.py +135 -140
  34. crawlo/middleware/proxy.py +246 -0
  35. crawlo/middleware/request_ignore.py +30 -30
  36. crawlo/middleware/response_code.py +18 -18
  37. crawlo/middleware/response_filter.py +26 -26
  38. crawlo/middleware/retry.py +90 -90
  39. crawlo/network/__init__.py +7 -7
  40. crawlo/network/request.py +203 -204
  41. crawlo/network/response.py +166 -166
  42. crawlo/pipelines/__init__.py +13 -13
  43. crawlo/pipelines/console_pipeline.py +39 -39
  44. crawlo/pipelines/mongo_pipeline.py +116 -116
  45. crawlo/pipelines/mysql_batch_pipline.py +273 -134
  46. crawlo/pipelines/mysql_pipeline.py +195 -195
  47. crawlo/pipelines/pipeline_manager.py +56 -56
  48. crawlo/settings/__init__.py +7 -7
  49. crawlo/settings/default_settings.py +169 -94
  50. crawlo/settings/setting_manager.py +99 -99
  51. crawlo/spider/__init__.py +41 -36
  52. crawlo/stats_collector.py +59 -59
  53. crawlo/subscriber.py +106 -106
  54. crawlo/task_manager.py +27 -27
  55. crawlo/templates/crawlo.cfg.tmpl +11 -0
  56. crawlo/templates/project/__init__.py.tmpl +4 -0
  57. crawlo/templates/project/items.py.tmpl +18 -0
  58. crawlo/templates/project/middlewares.py.tmpl +76 -0
  59. crawlo/templates/project/pipelines.py.tmpl +64 -0
  60. crawlo/templates/project/settings.py.tmpl +54 -0
  61. crawlo/templates/project/spiders/__init__.py.tmpl +6 -0
  62. crawlo/templates/spider/spider.py.tmpl +32 -0
  63. crawlo/utils/__init__.py +7 -7
  64. crawlo/utils/concurrency_manager.py +124 -124
  65. crawlo/utils/date_tools.py +233 -177
  66. crawlo/utils/db_helper.py +344 -0
  67. crawlo/utils/func_tools.py +82 -82
  68. crawlo/utils/log.py +129 -39
  69. crawlo/utils/pqueue.py +173 -173
  70. crawlo/utils/project.py +199 -59
  71. crawlo/utils/request.py +267 -122
  72. crawlo/utils/spider_loader.py +63 -0
  73. crawlo/utils/system.py +11 -11
  74. crawlo/utils/tools.py +5 -303
  75. crawlo/utils/url.py +39 -39
  76. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/METADATA +49 -48
  77. crawlo-1.0.6.dist-info/RECORD +94 -0
  78. crawlo-1.0.6.dist-info/entry_points.txt +2 -0
  79. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/top_level.txt +1 -0
  80. examples/gxb/items.py +36 -0
  81. examples/gxb/run.py +16 -0
  82. examples/gxb/settings.py +72 -0
  83. examples/gxb/spider/__init__.py +0 -0
  84. examples/gxb/spider/miit_spider.py +180 -0
  85. examples/gxb/spider/telecom_device.py +129 -0
  86. tests/__init__.py +7 -7
  87. tests/test_proxy_health_check.py +33 -0
  88. tests/test_proxy_middleware_integration.py +137 -0
  89. tests/test_proxy_providers.py +57 -0
  90. tests/test_proxy_stats.py +20 -0
  91. tests/test_proxy_strategies.py +60 -0
  92. crawlo/downloader/playwright_downloader.py +0 -161
  93. crawlo/templates/item_template.tmpl +0 -22
  94. crawlo/templates/project_template/main.py +0 -33
  95. crawlo/templates/project_template/setting.py +0 -190
  96. crawlo/templates/spider_template.tmpl +0 -31
  97. crawlo-1.0.4.dist-info/RECORD +0 -79
  98. crawlo-1.0.4.dist-info/entry_points.txt +0 -2
  99. tests/baidu_spider/__init__.py +0 -7
  100. tests/baidu_spider/demo.py +0 -94
  101. tests/baidu_spider/items.py +0 -25
  102. tests/baidu_spider/middleware.py +0 -49
  103. tests/baidu_spider/pipeline.py +0 -55
  104. tests/baidu_spider/request_fingerprints.txt +0 -9
  105. tests/baidu_spider/run.py +0 -27
  106. tests/baidu_spider/settings.py +0 -80
  107. tests/baidu_spider/spiders/__init__.py +0 -7
  108. tests/baidu_spider/spiders/bai_du.py +0 -61
  109. tests/baidu_spider/spiders/sina.py +0 -79
  110. {crawlo-1.0.4.dist-info → crawlo-1.0.6.dist-info}/WHEEL +0 -0
  111. {crawlo/templates/project_template/items → examples}/__init__.py +0 -0
  112. {crawlo/templates/project_template/spiders → examples/gxb}/__init__.py +0 -0
crawlo/subscriber.py CHANGED
@@ -1,106 +1,106 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import asyncio
4
- from collections import defaultdict
5
- from inspect import iscoroutinefunction
6
- from typing import Dict, Set, Callable, Coroutine, Any, TypeAlias, List
7
-
8
-
9
- class ReceiverTypeError(TypeError):
10
- """当订阅的接收者不是一个协程函数时抛出。"""
11
- pass
12
-
13
-
14
- ReceiverCoroutine: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
15
-
16
-
17
- class Subscriber:
18
- """
19
- 一个支持异步协程的发布/订阅(Pub/Sub)模式实现。
20
-
21
- 这个类允许你注册(订阅)协程函数来监听特定事件,并在事件发生时
22
- 以并发的方式异步地通知所有订阅者。
23
- """
24
-
25
- def __init__(self):
26
- """初始化一个空的订阅者字典。"""
27
- self._subscribers: Dict[str, Set[ReceiverCoroutine]] = defaultdict(set)
28
-
29
- def subscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
30
- """
31
- 订阅一个事件。
32
-
33
- Args:
34
- receiver: 一个协程函数 (例如 async def my_func(...))。
35
- event: 要订阅的事件名称。
36
-
37
- Raises:
38
- ReceiverTypeError: 如果提供的 `receiver` 不是一个协程函数。
39
- """
40
- if not iscoroutinefunction(receiver):
41
- raise ReceiverTypeError(f"接收者 '{receiver.__qualname__}' 必须是一个协程函数。")
42
- self._subscribers[event].add(receiver)
43
-
44
- def unsubscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
45
- """
46
- 取消订阅一个事件。
47
-
48
- 如果事件或接收者不存在,将静默处理。
49
-
50
- Args:
51
- receiver: 要取消订阅的协程函数。
52
- event: 事件名称。
53
- """
54
- if event in self._subscribers:
55
- self._subscribers[event].discard(receiver)
56
-
57
- async def notify(self, event: str, *args, **kwargs) -> List[Any]:
58
- """
59
- 异步地、并发地通知所有订阅了该事件的接收者。
60
-
61
- 此方法会等待所有订阅者任务完成后再返回,并收集所有结果或异常。
62
-
63
- Args:
64
- event: 要触发的事件名称。
65
- *args: 传递给接收者的位置参数。
66
- **kwargs: 传递给接收者的关键字参数。
67
-
68
- Returns:
69
- 一个列表,包含每个订阅者任务的返回结果或在执行期间捕获的异常。
70
- """
71
- receivers = self._subscribers.get(event, set())
72
- if not receivers:
73
- return []
74
-
75
- tasks = [asyncio.create_task(receiver(*args, **kwargs)) for receiver in receivers]
76
-
77
- # 并发执行所有任务并返回结果列表(包括异常)
78
- return await asyncio.gather(*tasks, return_exceptions=True)
79
-
80
- # #!/usr/bin/python
81
- # # -*- coding:UTF-8 -*-
82
- # import asyncio
83
- # from collections import defaultdict
84
- # from inspect import iscoroutinefunction
85
- # from typing import Dict, Set, Callable, Coroutine
86
- #
87
- # from crawlo.exceptions import ReceiverTypeError
88
- #
89
- #
90
- # class Subscriber:
91
- #
92
- # def __init__(self):
93
- # self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
94
- #
95
- # def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
96
- # if not iscoroutinefunction(receiver):
97
- # raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
98
- # self._subscribers[event].add(receiver)
99
- #
100
- # def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
101
- # self._subscribers[event].discard(receiver)
102
- #
103
- # async def notify(self, event: str, *args, **kwargs) -> None:
104
- # for receiver in self._subscribers[event]:
105
- # # 不能 await
106
- # asyncio.create_task(receiver(*args, **kwargs))
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ from collections import defaultdict
5
+ from inspect import iscoroutinefunction
6
+ from typing import Dict, Set, Callable, Coroutine, Any, TypeAlias, List
7
+
8
+
9
+ class ReceiverTypeError(TypeError):
10
+ """当订阅的接收者不是一个协程函数时抛出。"""
11
+ pass
12
+
13
+
14
+ ReceiverCoroutine: TypeAlias = Callable[..., Coroutine[Any, Any, Any]]
15
+
16
+
17
+ class Subscriber:
18
+ """
19
+ 一个支持异步协程的发布/订阅(Pub/Sub)模式实现。
20
+
21
+ 这个类允许你注册(订阅)协程函数来监听特定事件,并在事件发生时
22
+ 以并发的方式异步地通知所有订阅者。
23
+ """
24
+
25
+ def __init__(self):
26
+ """初始化一个空的订阅者字典。"""
27
+ self._subscribers: Dict[str, Set[ReceiverCoroutine]] = defaultdict(set)
28
+
29
+ def subscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
30
+ """
31
+ 订阅一个事件。
32
+
33
+ Args:
34
+ receiver: 一个协程函数 (例如 async def my_func(...))。
35
+ event: 要订阅的事件名称。
36
+
37
+ Raises:
38
+ ReceiverTypeError: 如果提供的 `receiver` 不是一个协程函数。
39
+ """
40
+ if not iscoroutinefunction(receiver):
41
+ raise ReceiverTypeError(f"接收者 '{receiver.__qualname__}' 必须是一个协程函数。")
42
+ self._subscribers[event].add(receiver)
43
+
44
+ def unsubscribe(self, receiver: ReceiverCoroutine, *, event: str) -> None:
45
+ """
46
+ 取消订阅一个事件。
47
+
48
+ 如果事件或接收者不存在,将静默处理。
49
+
50
+ Args:
51
+ receiver: 要取消订阅的协程函数。
52
+ event: 事件名称。
53
+ """
54
+ if event in self._subscribers:
55
+ self._subscribers[event].discard(receiver)
56
+
57
+ async def notify(self, event: str, *args, **kwargs) -> List[Any]:
58
+ """
59
+ 异步地、并发地通知所有订阅了该事件的接收者。
60
+
61
+ 此方法会等待所有订阅者任务完成后再返回,并收集所有结果或异常。
62
+
63
+ Args:
64
+ event: 要触发的事件名称。
65
+ *args: 传递给接收者的位置参数。
66
+ **kwargs: 传递给接收者的关键字参数。
67
+
68
+ Returns:
69
+ 一个列表,包含每个订阅者任务的返回结果或在执行期间捕获的异常。
70
+ """
71
+ receivers = self._subscribers.get(event, set())
72
+ if not receivers:
73
+ return []
74
+
75
+ tasks = [asyncio.create_task(receiver(*args, **kwargs)) for receiver in receivers]
76
+
77
+ # 并发执行所有任务并返回结果列表(包括异常)
78
+ return await asyncio.gather(*tasks, return_exceptions=True)
79
+
80
+ # #!/usr/bin/python
81
+ # # -*- coding:UTF-8 -*-
82
+ # import asyncio
83
+ # from collections import defaultdict
84
+ # from inspect import iscoroutinefunction
85
+ # from typing import Dict, Set, Callable, Coroutine
86
+ #
87
+ # from crawlo.exceptions import ReceiverTypeError
88
+ #
89
+ #
90
+ # class Subscriber:
91
+ #
92
+ # def __init__(self):
93
+ # self._subscribers: Dict[str, Set[Callable[..., Coroutine]]] = defaultdict(set)
94
+ #
95
+ # def subscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
96
+ # if not iscoroutinefunction(receiver):
97
+ # raise ReceiverTypeError(f"{receiver.__qualname__} must be a coroutine function")
98
+ # self._subscribers[event].add(receiver)
99
+ #
100
+ # def unsubscribe(self, receiver: Callable[..., Coroutine], *, event: str) -> None:
101
+ # self._subscribers[event].discard(receiver)
102
+ #
103
+ # async def notify(self, event: str, *args, **kwargs) -> None:
104
+ # for receiver in self._subscribers[event]:
105
+ # # 不能 await
106
+ # asyncio.create_task(receiver(*args, **kwargs))
crawlo/task_manager.py CHANGED
@@ -1,27 +1,27 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- import asyncio
4
- from asyncio import Task, Future, Semaphore
5
- from typing import Set, Final
6
-
7
-
8
- class TaskManager:
9
-
10
- def __init__(self, total_concurrency: int = 8):
11
- self.current_task: Final[Set] = set()
12
- self.semaphore: Semaphore = Semaphore(total_concurrency)
13
-
14
- def create_task(self, coroutine) -> Task:
15
- task = asyncio.create_task(coroutine)
16
- self.current_task.add(task)
17
-
18
- def done_callback(_future: Future) -> None:
19
- self.current_task.remove(task)
20
- self.semaphore.release()
21
-
22
- task.add_done_callback(done_callback)
23
-
24
- return task
25
-
26
- def all_done(self) -> bool:
27
- return len(self.current_task) == 0
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ from asyncio import Task, Future, Semaphore
5
+ from typing import Set, Final
6
+
7
+
8
+ class TaskManager:
9
+
10
+ def __init__(self, total_concurrency: int = 8):
11
+ self.current_task: Final[Set] = set()
12
+ self.semaphore: Semaphore = Semaphore(total_concurrency)
13
+
14
+ def create_task(self, coroutine) -> Task:
15
+ task = asyncio.create_task(coroutine)
16
+ self.current_task.add(task)
17
+
18
+ def done_callback(_future: Future) -> None:
19
+ self.current_task.remove(task)
20
+ self.semaphore.release()
21
+
22
+ task.add_done_callback(done_callback)
23
+
24
+ return task
25
+
26
+ def all_done(self) -> bool:
27
+ return len(self.current_task) == 0
@@ -0,0 +1,11 @@
1
+ # crawlo.cfg
2
+ # 项目的配置文件。
3
+
4
+ [settings]
5
+ # 指定 settings 模块的导入路径
6
+ default = {{project_name}}.settings
7
+
8
+ [deploy]
9
+ # (可选)用于部署配置
10
+ # url = http://localhost:6800/
11
+ # project = {{project_name}}
@@ -0,0 +1,4 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}} 项目包
4
+ """
@@ -0,0 +1,18 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.items
4
+ ======================
5
+ 定义你抓取的数据结构。
6
+ """
7
+
8
+ from crawlo.items import Item, Field
9
+
10
+
11
+ class ExampleItem(Item):
12
+ """
13
+ 一个示例数据项。
14
+ """
15
+ # name = Field()
16
+ # price = Field()
17
+ # description = Field()
18
+ pass
@@ -0,0 +1,76 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.middlewares
4
+ ============================
5
+ 自定义中间件,用于在请求/响应/异常处理过程中插入自定义逻辑。
6
+ """
7
+
8
+ # 示例:下载器中间件
9
+ class CustomDownloaderMiddleware:
10
+ """
11
+ 下载器中间件示例。
12
+ """
13
+
14
+ def process_request(self, request, spider):
15
+ """
16
+ 在请求被下载器执行前调用。
17
+ """
18
+ # request.headers['User-Agent'] = 'Custom UA'
19
+ # return None # 继续处理
20
+ # return request # 修改并返回
21
+ # return Response(...) # 返回一个响应,停止下载
22
+ pass
23
+
24
+ def process_response(self, request, response, spider):
25
+ """
26
+ 在响应被 Spider 处理前调用。
27
+ """
28
+ # return response # 继续处理
29
+ # return request # 重试请求
30
+ pass
31
+
32
+ def process_exception(self, request, exception, spider):
33
+ """
34
+ 在下载或处理过程中发生异常时调用。
35
+ """
36
+ # return None # 继续抛出异常
37
+ # return request # 重试
38
+ # return Response(...) # 返回一个响应
39
+ pass
40
+
41
+
42
+ # 示例:Spider 中间件
43
+ class CustomSpiderMiddleware:
44
+ """
45
+ Spider 中间件示例。
46
+ """
47
+
48
+ def process_spider_input(self, response, spider):
49
+ """
50
+ 在 Spider 的 parse 方法被调用前调用。
51
+ """
52
+ # 可以用来验证响应
53
+ pass
54
+
55
+ def process_spider_output(self, response, result, spider):
56
+ """
57
+ 在 Spider 的 parse 方法返回结果后调用。
58
+ """
59
+ # 可以用来过滤或修改结果
60
+ # for item in result:
61
+ # yield item
62
+ pass
63
+
64
+ def process_spider_exception(self, response, exception, spider):
65
+ """
66
+ 在 Spider 的 parse 方法抛出异常时调用。
67
+ """
68
+ pass
69
+
70
+ def process_start_requests(self, start_requests, spider):
71
+ """
72
+ 在 Spider 的 start_requests 生成器被消费时调用。
73
+ """
74
+ # for request in start_requests:
75
+ # yield request
76
+ pass
@@ -0,0 +1,64 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.pipelines
4
+ ==========================
5
+ 数据管道,用于处理 Spider 返回的 Item。
6
+ 例如:清理、验证、去重、保存到数据库等。
7
+ """
8
+
9
+ class PrintItemPipeline:
10
+ """
11
+ 一个简单的管道,用于打印 Item。
12
+ """
13
+
14
+ def process_item(self, item, spider):
15
+ print(f"Pipeline received item: {dict(item)}")
16
+ return item
17
+
18
+
19
+ class DuplicatesPipeline:
20
+ """
21
+ 一个去重管道示例。
22
+ """
23
+ def __init__(self):
24
+ self.seen = set()
25
+
26
+ def process_item(self, item, spider):
27
+ identifier = item.get('id') or item.get('url')
28
+ if identifier in self.seen:
29
+ spider.logger.debug(f"Duplicate item found: {identifier}")
30
+ raise DropItem(f"Duplicate item: {identifier}")
31
+ self.seen.add(identifier)
32
+ return item
33
+
34
+
35
+ # class MySQLPipeline:
36
+ # """
37
+ # 将 Item 保存到 MySQL 的管道示例。
38
+ # """
39
+ # def __init__(self, mysql_uri, mysql_user, mysql_password, mysql_db):
40
+ # self.mysql_uri = mysql_uri
41
+ # self.mysql_user = mysql_user
42
+ # self.mysql_password = mysql_password
43
+ # self.mysql_db = mysql_db
44
+ # self.connection = None
45
+ #
46
+ # @classmethod
47
+ # def from_settings(cls, settings):
48
+ # return cls(
49
+ # mysql_uri=settings.get('MYSQL_HOST'),
50
+ # mysql_user=settings.get('MYSQL_USER'),
51
+ # mysql_password=settings.get('MYSQL_PASSWORD'),
52
+ # mysql_db=settings.get('MYSQL_DB')
53
+ # )
54
+ #
55
+ # def open_spider(self, spider):
56
+ # self.connection = pymysql.connect(...)
57
+ #
58
+ # def close_spider(self, spider):
59
+ # if self.connection:
60
+ # self.connection.close()
61
+ #
62
+ # def process_item(self, item, spider):
63
+ # # 执行 SQL 插入
64
+ # return item
@@ -0,0 +1,54 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """自动创建的 settings.py 文件"""
3
+
4
+ PROJECT_NAME = '{{project_name}}'
5
+ VERSION = '1.0'
6
+
7
+ # ============================== 网络请求配置 ==============================
8
+ DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader"
9
+ DOWNLOAD_TIMEOUT = 60
10
+ VERIFY_SSL = True
11
+ USE_SESSION = True
12
+
13
+ DOWNLOAD_DELAY = 1.0
14
+ RANDOMNESS = True
15
+
16
+ MAX_RETRY_TIMES = 3
17
+ RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524]
18
+ IGNORE_HTTP_CODES = [403, 404]
19
+
20
+ CONNECTION_POOL_LIMIT = 100
21
+
22
+ # ============================== 并发与调度 ==============================
23
+ CONCURRENCY = 8
24
+ MAX_RUNNING_SPIDERS = 3
25
+
26
+ # ============================== 数据存储 ==============================
27
+ MYSQL_HOST = '127.0.0.1'
28
+ MYSQL_PORT = 3306
29
+ MYSQL_USER = 'root'
30
+ MYSQL_PASSWORD = '123456'
31
+ MYSQL_DB = '{{project_name}}'
32
+ MYSQL_TABLE = 'crawled_data'
33
+
34
+ # ============================== 去重过滤 ==============================
35
+ FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
36
+
37
+ # ============================== 中间件 & 管道 ==============================
38
+ MIDDLEWARES = [
39
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
40
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware',
41
+ 'crawlo.middleware.default_header.DefaultHeaderMiddleware',
42
+ 'crawlo.middleware.proxy.ProxyMiddleware',
43
+ 'crawlo.middleware.retry.RetryMiddleware',
44
+ 'crawlo.middleware.response_code.ResponseCodeMiddleware',
45
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware',
46
+ ]
47
+
48
+ PIPELINES = [
49
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline',
50
+ ]
51
+
52
+ # ============================== 日志 ==============================
53
+ LOG_LEVEL = 'INFO'
54
+ LOG_FILE = f'logs/{{{{project_name}}}}.log'
@@ -0,0 +1,6 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.spiders
4
+ ========================
5
+ 存放所有的爬虫。
6
+ """
@@ -0,0 +1,32 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.spiders.{{spider_name}}
4
+ =======================================
5
+ 由 `crawlo genspider` 命令生成的爬虫。
6
+ """
7
+
8
+ from crawlo.spider import Spider
9
+
10
+
11
+ class {{class_name}}(Spider):
12
+ """
13
+ 爬虫:{{spider_name}}
14
+ """
15
+ name = '{{spider_name}}'
16
+ allowed_domains = ['{{domain}}']
17
+ start_urls = ['https://{{domain}}/']
18
+
19
+ def parse(self, response):
20
+ """
21
+ 解析响应的主方法。
22
+ """
23
+ # TODO: 在这里编写你的解析逻辑
24
+
25
+ # 示例:提取数据
26
+ # item = {{item_class}}()
27
+ # item['title'] = response.xpath('//title/text()').get()
28
+ # yield item
29
+
30
+ # 示例:提取链接并跟进
31
+ # for href in response.xpath('//a/@href').getall():
32
+ # yield response.follow(href, callback=self.parse)
crawlo/utils/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-02-05 13:57
5
- # @Author : oscar
6
- # @Desc : None
7
- """
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-02-05 13:57
5
+ # @Author : oscar
6
+ # @Desc : None
7
+ """