crawlo 1.0.9__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (111) hide show
  1. crawlo/__init__.py +33 -24
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +40 -40
  4. crawlo/commands/__init__.py +13 -13
  5. crawlo/commands/check.py +594 -106
  6. crawlo/commands/genspider.py +125 -110
  7. crawlo/commands/list.py +147 -92
  8. crawlo/commands/run.py +286 -181
  9. crawlo/commands/startproject.py +111 -101
  10. crawlo/commands/stats.py +188 -59
  11. crawlo/core/__init__.py +2 -2
  12. crawlo/core/engine.py +158 -158
  13. crawlo/core/processor.py +40 -40
  14. crawlo/core/scheduler.py +57 -57
  15. crawlo/crawler.py +494 -492
  16. crawlo/downloader/__init__.py +78 -78
  17. crawlo/downloader/aiohttp_downloader.py +199 -199
  18. crawlo/downloader/cffi_downloader.py +242 -277
  19. crawlo/downloader/httpx_downloader.py +246 -246
  20. crawlo/event.py +11 -11
  21. crawlo/exceptions.py +78 -78
  22. crawlo/extension/__init__.py +31 -31
  23. crawlo/extension/log_interval.py +49 -49
  24. crawlo/extension/log_stats.py +44 -44
  25. crawlo/extension/logging_extension.py +34 -34
  26. crawlo/filters/__init__.py +37 -37
  27. crawlo/filters/aioredis_filter.py +150 -150
  28. crawlo/filters/memory_filter.py +202 -202
  29. crawlo/items/__init__.py +23 -23
  30. crawlo/items/base.py +21 -21
  31. crawlo/items/fields.py +53 -53
  32. crawlo/items/items.py +104 -104
  33. crawlo/middleware/__init__.py +21 -21
  34. crawlo/middleware/default_header.py +32 -32
  35. crawlo/middleware/download_delay.py +28 -28
  36. crawlo/middleware/middleware_manager.py +135 -135
  37. crawlo/middleware/proxy.py +245 -245
  38. crawlo/middleware/request_ignore.py +30 -30
  39. crawlo/middleware/response_code.py +18 -18
  40. crawlo/middleware/response_filter.py +26 -26
  41. crawlo/middleware/retry.py +90 -90
  42. crawlo/network/__init__.py +7 -7
  43. crawlo/network/request.py +203 -203
  44. crawlo/network/response.py +166 -166
  45. crawlo/pipelines/__init__.py +13 -13
  46. crawlo/pipelines/console_pipeline.py +39 -39
  47. crawlo/pipelines/mongo_pipeline.py +116 -116
  48. crawlo/pipelines/mysql_batch_pipline.py +272 -272
  49. crawlo/pipelines/mysql_pipeline.py +195 -195
  50. crawlo/pipelines/pipeline_manager.py +56 -56
  51. crawlo/project.py +153 -0
  52. crawlo/settings/__init__.py +7 -7
  53. crawlo/settings/default_settings.py +166 -168
  54. crawlo/settings/setting_manager.py +99 -99
  55. crawlo/spider/__init__.py +129 -129
  56. crawlo/stats_collector.py +59 -59
  57. crawlo/subscriber.py +106 -106
  58. crawlo/task_manager.py +27 -27
  59. crawlo/templates/crawlo.cfg.tmpl +10 -10
  60. crawlo/templates/project/__init__.py.tmpl +3 -3
  61. crawlo/templates/project/items.py.tmpl +17 -17
  62. crawlo/templates/project/middlewares.py.tmpl +75 -75
  63. crawlo/templates/project/pipelines.py.tmpl +63 -63
  64. crawlo/templates/project/settings.py.tmpl +54 -54
  65. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  66. crawlo/templates/spider/spider.py.tmpl +31 -31
  67. crawlo/utils/__init__.py +7 -7
  68. crawlo/utils/date_tools.py +233 -233
  69. crawlo/utils/db_helper.py +343 -343
  70. crawlo/utils/func_tools.py +82 -82
  71. crawlo/utils/log.py +128 -128
  72. crawlo/utils/pqueue.py +173 -173
  73. crawlo/utils/request.py +267 -267
  74. crawlo/utils/spider_loader.py +62 -62
  75. crawlo/utils/system.py +11 -11
  76. crawlo/utils/tools.py +4 -4
  77. crawlo/utils/url.py +39 -39
  78. crawlo-1.1.1.dist-info/METADATA +220 -0
  79. crawlo-1.1.1.dist-info/RECORD +100 -0
  80. examples/__init__.py +7 -0
  81. examples/baidu_spider/__init__.py +7 -0
  82. examples/baidu_spider/demo.py +94 -0
  83. examples/baidu_spider/items.py +46 -0
  84. examples/baidu_spider/middleware.py +49 -0
  85. examples/baidu_spider/pipeline.py +55 -0
  86. examples/baidu_spider/run.py +27 -0
  87. examples/baidu_spider/settings.py +121 -0
  88. examples/baidu_spider/spiders/__init__.py +7 -0
  89. examples/baidu_spider/spiders/bai_du.py +61 -0
  90. examples/baidu_spider/spiders/miit.py +159 -0
  91. examples/baidu_spider/spiders/sina.py +79 -0
  92. tests/__init__.py +7 -7
  93. tests/test_proxy_health_check.py +32 -32
  94. tests/test_proxy_middleware_integration.py +136 -136
  95. tests/test_proxy_providers.py +56 -56
  96. tests/test_proxy_stats.py +19 -19
  97. tests/test_proxy_strategies.py +59 -59
  98. crawlo/utils/concurrency_manager.py +0 -125
  99. crawlo/utils/project.py +0 -197
  100. crawlo-1.0.9.dist-info/METADATA +0 -49
  101. crawlo-1.0.9.dist-info/RECORD +0 -97
  102. examples/gxb/__init__.py +0 -0
  103. examples/gxb/items.py +0 -36
  104. examples/gxb/run.py +0 -16
  105. examples/gxb/settings.py +0 -72
  106. examples/gxb/spider/__init__.py +0 -0
  107. examples/gxb/spider/miit_spider.py +0 -180
  108. examples/gxb/spider/telecom_device.py +0 -129
  109. {crawlo-1.0.9.dist-info → crawlo-1.1.1.dist-info}/WHEEL +0 -0
  110. {crawlo-1.0.9.dist-info → crawlo-1.1.1.dist-info}/entry_points.txt +0 -0
  111. {crawlo-1.0.9.dist-info → crawlo-1.1.1.dist-info}/top_level.txt +0 -0
crawlo/utils/request.py CHANGED
@@ -1,267 +1,267 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-07-08 08:55
5
- # @Author : crawl-coder
6
- # @Desc : None
7
- """
8
- import importlib
9
- import json
10
- import hashlib
11
- from typing import Any, Optional, Iterable, Union, Dict
12
- from w3lib.url import canonicalize_url
13
-
14
- from crawlo import Request
15
-
16
-
17
- def to_bytes(data: Any, encoding: str = 'utf-8') -> bytes:
18
- """
19
- 将各种类型统一转换为 bytes。
20
-
21
- Args:
22
- data: 要转换的数据,支持 str, bytes, dict, int, float, bool, None 等类型
23
- encoding: 字符串编码格式,默认为 'utf-8'
24
-
25
- Returns:
26
- bytes: 转换后的字节数据
27
-
28
- Raises:
29
- TypeError: 当数据类型无法转换时
30
- UnicodeEncodeError: 当编码失败时
31
- ValueError: 当 JSON 序列化失败时
32
-
33
- Examples:
34
- >>> to_bytes("hello")
35
- b'hello'
36
- >>> to_bytes({"key": "value"})
37
- b'{"key": "value"}'
38
- >>> to_bytes(123)
39
- b'123'
40
- >>> to_bytes(None)
41
- b'null'
42
- """
43
- # 预检查编码参数
44
- if not isinstance(encoding, str):
45
- raise TypeError(f"encoding must be str, not {type(encoding).__name__}")
46
-
47
- try:
48
- if isinstance(data, bytes):
49
- return data
50
- elif isinstance(data, str):
51
- return data.encode(encoding)
52
- elif isinstance(data, dict):
53
- return json.dumps(data, sort_keys=True, ensure_ascii=False, separators=(',', ':')).encode(encoding)
54
- elif isinstance(data, (int, float, bool)):
55
- return str(data).encode(encoding)
56
- elif data is None:
57
- return b'null'
58
- elif hasattr(data, '__str__'):
59
- # 处理其他可转换为字符串的对象
60
- return str(data).encode(encoding)
61
- else:
62
- raise TypeError(
63
- f"`data` must be str, dict, bytes, int, float, bool, or None, "
64
- f"not {type(data).__name__}"
65
- )
66
- except (UnicodeEncodeError, ValueError) as e:
67
- raise type(e)(f"Failed to convert {type(data).__name__} to bytes: {str(e)}") from e
68
-
69
-
70
- def request_fingerprint(
71
- request: Request,
72
- include_headers: Optional[Iterable[Union[bytes, str]]] = None
73
- ) -> str:
74
- """
75
- 生成请求指纹,基于方法、标准化 URL、body 和可选的 headers。
76
- 使用 SHA256 哈希算法以提高安全性。
77
-
78
- :param request: Request 对象(需包含 method, url, body, headers)
79
- :param include_headers: 指定要参与指纹计算的 header 名称列表(str 或 bytes)
80
- :return: 请求指纹(hex string)
81
- """
82
- hash_func = hashlib.sha256()
83
-
84
- # 基本字段
85
- hash_func.update(to_bytes(request.method))
86
- hash_func.update(to_bytes(canonicalize_url(request.url)))
87
- hash_func.update(request.body or b'')
88
-
89
- # 处理 headers
90
- if include_headers:
91
- headers = request.headers # 假设 headers 是类似字典或 MultiDict 的结构
92
- for header_name in include_headers:
93
- name_bytes = to_bytes(header_name).lower() # 统一转为小写进行匹配
94
- value = b''
95
-
96
- # 兼容 headers 的访问方式(如 MultiDict 或 dict)
97
- if hasattr(headers, 'get_all'):
98
- # 如 scrapy.http.Headers 的 get_all 方法
99
- values = headers.get_all(name_bytes)
100
- value = b';'.join(values) if values else b''
101
- elif hasattr(headers, '__getitem__'):
102
- # 如普通 dict
103
- try:
104
- raw_value = headers[name_bytes]
105
- if isinstance(raw_value, list):
106
- value = b';'.join(to_bytes(v) for v in raw_value)
107
- else:
108
- value = to_bytes(raw_value)
109
- except (KeyError, TypeError):
110
- value = b''
111
- else:
112
- value = b''
113
-
114
- hash_func.update(name_bytes + b':' + value)
115
-
116
- return hash_func.hexdigest()
117
-
118
-
119
- def set_request(request: Request, priority: int) -> None:
120
- request.meta['depth'] = request.meta.setdefault('depth', 0) + 1
121
- if priority:
122
- request.priority -= request.meta['depth'] * priority
123
-
124
-
125
- def request_to_dict(request: Request, spider=None) -> Dict[str, Any]:
126
- """
127
- 将 Request 对象转换为可 JSON 序列化的字典。
128
-
129
- Args:
130
- request: 要序列化的 Request 对象
131
- spider: 可选,用于辅助序列化(如回调函数的归属)
132
-
133
- Returns:
134
- 包含 Request 所有关键信息的字典
135
- """
136
- # 基础属性
137
- d = {
138
- 'url': request.url,
139
- 'method': request.method,
140
- 'headers': dict(request.headers),
141
- 'body': request.body,
142
- 'meta': request.meta.copy(), # 复制一份
143
- 'flags': request.flags.copy(),
144
- 'cb_kwargs': request.cb_kwargs.copy(),
145
- }
146
-
147
- # 1. 处理 callback
148
- # 不能直接序列化函数,所以存储其路径
149
- if callable(request.callback):
150
- d['_callback'] = _get_function_path(request.callback)
151
-
152
- # 2. 处理 errback
153
- if callable(request.errback):
154
- d['_errback'] = _get_function_path(request.errback)
155
-
156
- # 3. 记录原始类名,以便反序列化时创建正确的实例
157
- d['_class'] = request.__class__.__module__ + '.' + request.__class__.__name__
158
-
159
- # 4. 特殊处理 FormRequest
160
- # 如果是 FormRequest,需要保存 formdata
161
- if isinstance(request, Request):
162
- if hasattr(request, 'formdata'):
163
- d['formdata'] = request.formdata
164
-
165
- return d
166
-
167
-
168
- def request_from_dict(d: Dict[str, Any], spider=None) -> Request:
169
- """
170
- 从字典重建 Request 对象。
171
-
172
- Args:
173
- d: 由 request_to_dict 生成的字典
174
- spider: 可选,用于解析回调函数
175
-
176
- Returns:
177
- 重建的 Request 对象
178
- """
179
- # 1. 获取类名并动态导入
180
- cls_path = d.pop('_class', None)
181
- if cls_path:
182
- module_path, cls_name = cls_path.rsplit('.', 1)
183
- module = importlib.import_module(module_path)
184
- cls = getattr(module, cls_name)
185
- else:
186
- cls = Request # 默认为 Request
187
-
188
- # 2. 提取回调函数
189
- callback_path = d.pop('_callback', None)
190
- callback = _get_function_from_path(callback_path, spider) if callback_path else None
191
-
192
- # 3. 提取错误回调
193
- errback_path = d.pop('_errback', None)
194
- errback = _get_function_from_path(errback_path, spider) if errback_path else None
195
-
196
- # 4. 提取特殊字段
197
- formdata = d.pop('formdata', None)
198
-
199
- # 5. 创建 Request 实例
200
- # 注意:body 和 formdata 不能同时存在
201
- if formdata is not None and cls is FormRequest:
202
- # 如果是 FormRequest 且有 formdata,优先使用 formdata
203
- request = FormRequest(
204
- url=d['url'],
205
- method=d.get('method', 'GET'),
206
- headers=d.get('headers', {}),
207
- formdata=formdata,
208
- callback=callback,
209
- errback=errback,
210
- meta=d.get('meta', {}),
211
- flags=d.get('flags', []),
212
- cb_kwargs=d.get('cb_kwargs', {}),
213
- )
214
- else:
215
- # 普通 Request 或没有 formdata 的情况
216
- request = cls(
217
- url=d['url'],
218
- method=d.get('method', 'GET'),
219
- headers=d.get('headers', {}),
220
- body=d.get('body'),
221
- callback=callback,
222
- errback=errback,
223
- meta=d.get('meta', {}),
224
- flags=d.get('flags', []),
225
- cb_kwargs=d.get('cb_kwargs', {}),
226
- )
227
-
228
- return request
229
-
230
-
231
- def _get_function_path(func: callable) -> str:
232
- """
233
- 获取函数的模块路径,如 'myproject.spiders.my_spider.parse'
234
- """
235
- if hasattr(func, '__wrapped__'):
236
- # 处理被装饰的函数
237
- func = func.__wrapped__
238
- module = func.__module__
239
- if module is None or module == str.__class__.__module__:
240
- raise ValueError(f"无法序列化内置函数或lambda: {func}")
241
- return f"{module}.{func.__qualname__}"
242
-
243
-
244
- def _get_function_from_path(path: str, spider=None) -> Optional[callable]:
245
- """
246
- 从路径字符串获取函数对象。
247
- 如果函数是 spider 的方法,会尝试绑定到 spider 实例。
248
- """
249
- try:
250
- module_path, func_name = path.rsplit('.', 1)
251
- module = importlib.import_module(module_path)
252
-
253
- # 逐级获取属性,支持 nested functions
254
- func = module
255
- for attr in func_name.split('.'):
256
- func = getattr(func, attr)
257
-
258
- # 如果 spider 存在,并且 func 是 spider 的方法
259
- if spider and hasattr(spider, func.__name__):
260
- spider_method = getattr(spider, func.__name__)
261
- if spider_method is func:
262
- return spider_method # 返回绑定的方法
263
-
264
- return func
265
- except Exception as e:
266
- raise ValueError(f"无法从路径 '{path}' 加载函数: {e}")
267
-
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-07-08 08:55
5
+ # @Author : crawl-coder
6
+ # @Desc : None
7
+ """
8
+ import importlib
9
+ import json
10
+ import hashlib
11
+ from typing import Any, Optional, Iterable, Union, Dict
12
+ from w3lib.url import canonicalize_url
13
+
14
+ from crawlo import Request
15
+
16
+
17
+ def to_bytes(data: Any, encoding: str = 'utf-8') -> bytes:
18
+ """
19
+ 将各种类型统一转换为 bytes。
20
+
21
+ Args:
22
+ data: 要转换的数据,支持 str, bytes, dict, int, float, bool, None 等类型
23
+ encoding: 字符串编码格式,默认为 'utf-8'
24
+
25
+ Returns:
26
+ bytes: 转换后的字节数据
27
+
28
+ Raises:
29
+ TypeError: 当数据类型无法转换时
30
+ UnicodeEncodeError: 当编码失败时
31
+ ValueError: 当 JSON 序列化失败时
32
+
33
+ Examples:
34
+ >>> to_bytes("hello")
35
+ b'hello'
36
+ >>> to_bytes({"key": "value"})
37
+ b'{"key": "value"}'
38
+ >>> to_bytes(123)
39
+ b'123'
40
+ >>> to_bytes(None)
41
+ b'null'
42
+ """
43
+ # 预检查编码参数
44
+ if not isinstance(encoding, str):
45
+ raise TypeError(f"encoding must be str, not {type(encoding).__name__}")
46
+
47
+ try:
48
+ if isinstance(data, bytes):
49
+ return data
50
+ elif isinstance(data, str):
51
+ return data.encode(encoding)
52
+ elif isinstance(data, dict):
53
+ return json.dumps(data, sort_keys=True, ensure_ascii=False, separators=(',', ':')).encode(encoding)
54
+ elif isinstance(data, (int, float, bool)):
55
+ return str(data).encode(encoding)
56
+ elif data is None:
57
+ return b'null'
58
+ elif hasattr(data, '__str__'):
59
+ # 处理其他可转换为字符串的对象
60
+ return str(data).encode(encoding)
61
+ else:
62
+ raise TypeError(
63
+ f"`data` must be str, dict, bytes, int, float, bool, or None, "
64
+ f"not {type(data).__name__}"
65
+ )
66
+ except (UnicodeEncodeError, ValueError) as e:
67
+ raise type(e)(f"Failed to convert {type(data).__name__} to bytes: {str(e)}") from e
68
+
69
+
70
+ def request_fingerprint(
71
+ request: Request,
72
+ include_headers: Optional[Iterable[Union[bytes, str]]] = None
73
+ ) -> str:
74
+ """
75
+ 生成请求指纹,基于方法、标准化 URL、body 和可选的 headers。
76
+ 使用 SHA256 哈希算法以提高安全性。
77
+
78
+ :param request: Request 对象(需包含 method, url, body, headers)
79
+ :param include_headers: 指定要参与指纹计算的 header 名称列表(str 或 bytes)
80
+ :return: 请求指纹(hex string)
81
+ """
82
+ hash_func = hashlib.sha256()
83
+
84
+ # 基本字段
85
+ hash_func.update(to_bytes(request.method))
86
+ hash_func.update(to_bytes(canonicalize_url(request.url)))
87
+ hash_func.update(request.body or b'')
88
+
89
+ # 处理 headers
90
+ if include_headers:
91
+ headers = request.headers # 假设 headers 是类似字典或 MultiDict 的结构
92
+ for header_name in include_headers:
93
+ name_bytes = to_bytes(header_name).lower() # 统一转为小写进行匹配
94
+ value = b''
95
+
96
+ # 兼容 headers 的访问方式(如 MultiDict 或 dict)
97
+ if hasattr(headers, 'get_all'):
98
+ # 如 scrapy.http.Headers 的 get_all 方法
99
+ values = headers.get_all(name_bytes)
100
+ value = b';'.join(values) if values else b''
101
+ elif hasattr(headers, '__getitem__'):
102
+ # 如普通 dict
103
+ try:
104
+ raw_value = headers[name_bytes]
105
+ if isinstance(raw_value, list):
106
+ value = b';'.join(to_bytes(v) for v in raw_value)
107
+ else:
108
+ value = to_bytes(raw_value)
109
+ except (KeyError, TypeError):
110
+ value = b''
111
+ else:
112
+ value = b''
113
+
114
+ hash_func.update(name_bytes + b':' + value)
115
+
116
+ return hash_func.hexdigest()
117
+
118
+
119
+ def set_request(request: Request, priority: int) -> None:
120
+ request.meta['depth'] = request.meta.setdefault('depth', 0) + 1
121
+ if priority:
122
+ request.priority -= request.meta['depth'] * priority
123
+
124
+
125
+ def request_to_dict(request: Request, spider=None) -> Dict[str, Any]:
126
+ """
127
+ 将 Request 对象转换为可 JSON 序列化的字典。
128
+
129
+ Args:
130
+ request: 要序列化的 Request 对象
131
+ spider: 可选,用于辅助序列化(如回调函数的归属)
132
+
133
+ Returns:
134
+ 包含 Request 所有关键信息的字典
135
+ """
136
+ # 基础属性
137
+ d = {
138
+ 'url': request.url,
139
+ 'method': request.method,
140
+ 'headers': dict(request.headers),
141
+ 'body': request.body,
142
+ 'meta': request.meta.copy(), # 复制一份
143
+ 'flags': request.flags.copy(),
144
+ 'cb_kwargs': request.cb_kwargs.copy(),
145
+ }
146
+
147
+ # 1. 处理 callback
148
+ # 不能直接序列化函数,所以存储其路径
149
+ if callable(request.callback):
150
+ d['_callback'] = _get_function_path(request.callback)
151
+
152
+ # 2. 处理 errback
153
+ if callable(request.errback):
154
+ d['_errback'] = _get_function_path(request.errback)
155
+
156
+ # 3. 记录原始类名,以便反序列化时创建正确的实例
157
+ d['_class'] = request.__class__.__module__ + '.' + request.__class__.__name__
158
+
159
+ # 4. 特殊处理 FormRequest
160
+ # 如果是 FormRequest,需要保存 formdata
161
+ if isinstance(request, Request):
162
+ if hasattr(request, 'formdata'):
163
+ d['formdata'] = request.formdata
164
+
165
+ return d
166
+
167
+
168
+ def request_from_dict(d: Dict[str, Any], spider=None) -> Request:
169
+ """
170
+ 从字典重建 Request 对象。
171
+
172
+ Args:
173
+ d: 由 request_to_dict 生成的字典
174
+ spider: 可选,用于解析回调函数
175
+
176
+ Returns:
177
+ 重建的 Request 对象
178
+ """
179
+ # 1. 获取类名并动态导入
180
+ cls_path = d.pop('_class', None)
181
+ if cls_path:
182
+ module_path, cls_name = cls_path.rsplit('.', 1)
183
+ module = importlib.import_module(module_path)
184
+ cls = getattr(module, cls_name)
185
+ else:
186
+ cls = Request # 默认为 Request
187
+
188
+ # 2. 提取回调函数
189
+ callback_path = d.pop('_callback', None)
190
+ callback = _get_function_from_path(callback_path, spider) if callback_path else None
191
+
192
+ # 3. 提取错误回调
193
+ errback_path = d.pop('_errback', None)
194
+ errback = _get_function_from_path(errback_path, spider) if errback_path else None
195
+
196
+ # 4. 提取特殊字段
197
+ formdata = d.pop('formdata', None)
198
+
199
+ # 5. 创建 Request 实例
200
+ # 注意:body 和 formdata 不能同时存在
201
+ if formdata is not None and cls is FormRequest:
202
+ # 如果是 FormRequest 且有 formdata,优先使用 formdata
203
+ request = FormRequest(
204
+ url=d['url'],
205
+ method=d.get('method', 'GET'),
206
+ headers=d.get('headers', {}),
207
+ formdata=formdata,
208
+ callback=callback,
209
+ errback=errback,
210
+ meta=d.get('meta', {}),
211
+ flags=d.get('flags', []),
212
+ cb_kwargs=d.get('cb_kwargs', {}),
213
+ )
214
+ else:
215
+ # 普通 Request 或没有 formdata 的情况
216
+ request = cls(
217
+ url=d['url'],
218
+ method=d.get('method', 'GET'),
219
+ headers=d.get('headers', {}),
220
+ body=d.get('body'),
221
+ callback=callback,
222
+ errback=errback,
223
+ meta=d.get('meta', {}),
224
+ flags=d.get('flags', []),
225
+ cb_kwargs=d.get('cb_kwargs', {}),
226
+ )
227
+
228
+ return request
229
+
230
+
231
+ def _get_function_path(func: callable) -> str:
232
+ """
233
+ 获取函数的模块路径,如 'myproject.spiders.my_spider.parse'
234
+ """
235
+ if hasattr(func, '__wrapped__'):
236
+ # 处理被装饰的函数
237
+ func = func.__wrapped__
238
+ module = func.__module__
239
+ if module is None or module == str.__class__.__module__:
240
+ raise ValueError(f"无法序列化内置函数或lambda: {func}")
241
+ return f"{module}.{func.__qualname__}"
242
+
243
+
244
+ def _get_function_from_path(path: str, spider=None) -> Optional[callable]:
245
+ """
246
+ 从路径字符串获取函数对象。
247
+ 如果函数是 spider 的方法,会尝试绑定到 spider 实例。
248
+ """
249
+ try:
250
+ module_path, func_name = path.rsplit('.', 1)
251
+ module = importlib.import_module(module_path)
252
+
253
+ # 逐级获取属性,支持 nested functions
254
+ func = module
255
+ for attr in func_name.split('.'):
256
+ func = getattr(func, attr)
257
+
258
+ # 如果 spider 存在,并且 func 是 spider 的方法
259
+ if spider and hasattr(spider, func.__name__):
260
+ spider_method = getattr(spider, func.__name__)
261
+ if spider_method is func:
262
+ return spider_method # 返回绑定的方法
263
+
264
+ return func
265
+ except Exception as e:
266
+ raise ValueError(f"无法从路径 '{path}' 加载函数: {e}")
267
+
@@ -1,63 +1,63 @@
1
- import importlib
2
- import inspect
3
- from pathlib import Path
4
- from typing import List, Type, Optional, Dict
5
-
6
- from crawlo.spider import Spider
7
- from crawlo.utils.log import get_logger
8
-
9
- logger = get_logger(__name__)
10
-
11
-
12
- class SpiderLoader:
13
- """爬虫加载器,负责发现和加载爬虫"""
14
-
15
- def __init__(self, project_package: str):
16
- self.project_package = project_package
17
- self._spiders: Dict[str, Type[Spider]] = {}
18
- self._load_spiders()
19
-
20
- def _load_spiders(self):
21
- """加载所有爬虫"""
22
- spiders_dir = Path.cwd() / self.project_package / 'spiders'
23
- if not spiders_dir.exists():
24
- logger.warning(f"Spiders directory not found: {spiders_dir}")
25
- return
26
-
27
- for py_file in spiders_dir.glob("*.py"):
28
- if py_file.name.startswith('_'):
29
- continue
30
-
31
- module_name = py_file.stem
32
- spider_module_path = f"{self.project_package}.spiders.{module_name}"
33
-
34
- try:
35
- module = importlib.import_module(spider_module_path)
36
- except ImportError as e:
37
- logger.debug(f"Skip module {module_name}: {e}")
38
- continue
39
-
40
- # 查找所有 Spider 子类
41
- for attr_name in dir(module):
42
- attr_value = getattr(module, attr_name)
43
- if (isinstance(attr_value, type) and
44
- issubclass(attr_value, Spider) and
45
- attr_value != Spider and
46
- hasattr(attr_value, 'name')):
47
-
48
- spider_name = getattr(attr_value, 'name')
49
- if spider_name in self._spiders:
50
- logger.warning(f"Duplicate spider name '{spider_name}' found")
51
- self._spiders[spider_name] = attr_value
52
-
53
- def load(self, spider_name: str) -> Optional[Type[Spider]]:
54
- """通过 name 加载爬虫"""
55
- return self._spiders.get(spider_name)
56
-
57
- def list(self) -> List[str]:
58
- """列出所有可用的爬虫名称"""
59
- return list(self._spiders.keys())
60
-
61
- def get_all(self) -> Dict[str, Type[Spider]]:
62
- """获取所有爬虫"""
1
+ import importlib
2
+ import inspect
3
+ from pathlib import Path
4
+ from typing import List, Type, Optional, Dict
5
+
6
+ from crawlo.spider import Spider
7
+ from crawlo.utils.log import get_logger
8
+
9
+ logger = get_logger(__name__)
10
+
11
+
12
+ class SpiderLoader:
13
+ """爬虫加载器,负责发现和加载爬虫"""
14
+
15
+ def __init__(self, project_package: str):
16
+ self.project_package = project_package
17
+ self._spiders: Dict[str, Type[Spider]] = {}
18
+ self._load_spiders()
19
+
20
+ def _load_spiders(self):
21
+ """加载所有爬虫"""
22
+ spiders_dir = Path.cwd() / self.project_package / 'spiders'
23
+ if not spiders_dir.exists():
24
+ logger.warning(f"Spiders directory not found: {spiders_dir}")
25
+ return
26
+
27
+ for py_file in spiders_dir.glob("*.py"):
28
+ if py_file.name.startswith('_'):
29
+ continue
30
+
31
+ module_name = py_file.stem
32
+ spider_module_path = f"{self.project_package}.spiders.{module_name}"
33
+
34
+ try:
35
+ module = importlib.import_module(spider_module_path)
36
+ except ImportError as e:
37
+ logger.debug(f"Skip module {module_name}: {e}")
38
+ continue
39
+
40
+ # 查找所有 Spider 子类
41
+ for attr_name in dir(module):
42
+ attr_value = getattr(module, attr_name)
43
+ if (isinstance(attr_value, type) and
44
+ issubclass(attr_value, Spider) and
45
+ attr_value != Spider and
46
+ hasattr(attr_value, 'name')):
47
+
48
+ spider_name = getattr(attr_value, 'name')
49
+ if spider_name in self._spiders:
50
+ logger.warning(f"Duplicate spider name '{spider_name}' found")
51
+ self._spiders[spider_name] = attr_value
52
+
53
+ def load(self, spider_name: str) -> Optional[Type[Spider]]:
54
+ """通过 name 加载爬虫"""
55
+ return self._spiders.get(spider_name)
56
+
57
+ def list(self) -> List[str]:
58
+ """列出所有可用的爬虫名称"""
59
+ return list(self._spiders.keys())
60
+
61
+ def get_all(self) -> Dict[str, Type[Spider]]:
62
+ """获取所有爬虫"""
63
63
  return self._spiders.copy()