crawlo 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (68) hide show
  1. crawlo/__init__.py +2 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/genspider.py +68 -42
  4. crawlo/commands/list.py +102 -93
  5. crawlo/commands/startproject.py +89 -4
  6. crawlo/commands/utils.py +187 -0
  7. crawlo/config.py +280 -0
  8. crawlo/core/engine.py +16 -3
  9. crawlo/core/enhanced_engine.py +190 -0
  10. crawlo/core/scheduler.py +113 -8
  11. crawlo/crawler.py +840 -307
  12. crawlo/downloader/__init__.py +181 -17
  13. crawlo/downloader/aiohttp_downloader.py +15 -2
  14. crawlo/downloader/cffi_downloader.py +11 -1
  15. crawlo/downloader/httpx_downloader.py +14 -3
  16. crawlo/filters/__init__.py +122 -5
  17. crawlo/filters/aioredis_filter.py +128 -36
  18. crawlo/filters/memory_filter.py +99 -32
  19. crawlo/middleware/proxy.py +11 -8
  20. crawlo/middleware/retry.py +40 -5
  21. crawlo/mode_manager.py +201 -0
  22. crawlo/network/__init__.py +17 -3
  23. crawlo/network/request.py +118 -10
  24. crawlo/network/response.py +131 -28
  25. crawlo/pipelines/__init__.py +1 -1
  26. crawlo/pipelines/csv_pipeline.py +317 -0
  27. crawlo/pipelines/json_pipeline.py +219 -0
  28. crawlo/queue/__init__.py +0 -0
  29. crawlo/queue/pqueue.py +37 -0
  30. crawlo/queue/queue_manager.py +304 -0
  31. crawlo/queue/redis_priority_queue.py +192 -0
  32. crawlo/settings/default_settings.py +68 -9
  33. crawlo/spider/__init__.py +576 -66
  34. crawlo/task_manager.py +4 -1
  35. crawlo/templates/project/middlewares.py.tmpl +56 -45
  36. crawlo/templates/project/pipelines.py.tmpl +308 -36
  37. crawlo/templates/project/run.py.tmpl +239 -0
  38. crawlo/templates/project/settings.py.tmpl +211 -17
  39. crawlo/templates/spider/spider.py.tmpl +153 -7
  40. crawlo/utils/controlled_spider_mixin.py +336 -0
  41. crawlo/utils/large_scale_config.py +287 -0
  42. crawlo/utils/large_scale_helper.py +344 -0
  43. crawlo/utils/queue_helper.py +176 -0
  44. crawlo/utils/request_serializer.py +220 -0
  45. crawlo-1.1.2.dist-info/METADATA +567 -0
  46. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/RECORD +54 -46
  47. tests/test_final_validation.py +154 -0
  48. tests/test_redis_config.py +29 -0
  49. tests/test_redis_queue.py +225 -0
  50. tests/test_request_serialization.py +71 -0
  51. tests/test_scheduler.py +242 -0
  52. crawlo/pipelines/mysql_batch_pipline.py +0 -273
  53. crawlo/utils/pqueue.py +0 -174
  54. crawlo-1.1.1.dist-info/METADATA +0 -220
  55. examples/baidu_spider/__init__.py +0 -7
  56. examples/baidu_spider/demo.py +0 -94
  57. examples/baidu_spider/items.py +0 -46
  58. examples/baidu_spider/middleware.py +0 -49
  59. examples/baidu_spider/pipeline.py +0 -55
  60. examples/baidu_spider/run.py +0 -27
  61. examples/baidu_spider/settings.py +0 -121
  62. examples/baidu_spider/spiders/__init__.py +0 -7
  63. examples/baidu_spider/spiders/bai_du.py +0 -61
  64. examples/baidu_spider/spiders/miit.py +0 -159
  65. examples/baidu_spider/spiders/sina.py +0 -79
  66. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
  67. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
  68. {crawlo-1.1.1.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
@@ -1,35 +1,84 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
+ """
4
+ Crawlo Downloader Module
5
+ ========================
6
+ 提供多种高性能异步下载器实现。
7
+
8
+ 下载器类型:
9
+ - AioHttpDownloader: 基于aiohttp的高性能下载器
10
+ - CurlCffiDownloader: 支持浏览器指纹模拟的curl-cffi下载器
11
+ - HttpXDownloader: 支持HTTP/2的httpx下载器
12
+
13
+ 核心类:
14
+ - DownloaderBase: 下载器基类
15
+ - ActivateRequestManager: 活跃请求管理器
16
+ """
3
17
  from abc import abstractmethod, ABCMeta
4
- from typing_extensions import Self
5
18
  from typing import Final, Set, Optional
6
19
  from contextlib import asynccontextmanager
7
20
 
8
- from crawlo import Response, Request
9
21
  from crawlo.utils.log import get_logger
10
22
  from crawlo.middleware.middleware_manager import MiddlewareManager
11
23
 
12
24
 
13
25
  class ActivateRequestManager:
26
+ """活跃请求管理器 - 跟踪和管理正在处理的请求"""
14
27
 
15
28
  def __init__(self):
16
29
  self._active: Final[Set] = set()
30
+ self._total_requests: int = 0
31
+ self._completed_requests: int = 0
32
+ self._failed_requests: int = 0
17
33
 
18
34
  def add(self, request):
35
+ """添加活跃请求"""
19
36
  self._active.add(request)
37
+ self._total_requests += 1
38
+ return request
20
39
 
21
- def remove(self, request):
22
- self._active.remove(request)
40
+ def remove(self, request, success: bool = True):
41
+ """移除活跃请求并更新统计"""
42
+ self._active.discard(request) # 使用discard避免KeyError
43
+ if success:
44
+ self._completed_requests += 1
45
+ else:
46
+ self._failed_requests += 1
23
47
 
24
48
  @asynccontextmanager
25
49
  async def __call__(self, request):
50
+ """上下文管理器用法"""
51
+ self.add(request)
52
+ success = False
26
53
  try:
27
- yield self.add(request)
54
+ yield request
55
+ success = True
56
+ except Exception:
57
+ success = False
58
+ raise
28
59
  finally:
29
- self.remove(request)
60
+ self.remove(request, success)
30
61
 
31
62
  def __len__(self):
63
+ """返回当前活跃请求数"""
32
64
  return len(self._active)
65
+
66
+ def get_stats(self) -> dict:
67
+ """获取请求统计信息"""
68
+ return {
69
+ 'active_requests': len(self._active),
70
+ 'total_requests': self._total_requests,
71
+ 'completed_requests': self._completed_requests,
72
+ 'failed_requests': self._failed_requests,
73
+ 'success_rate': self._completed_requests / max(1, self._total_requests - len(self._active))
74
+ }
75
+
76
+ def reset_stats(self):
77
+ """重置统计信息"""
78
+ self._total_requests = 0
79
+ self._completed_requests = 0
80
+ self._failed_requests = 0
81
+ # 注意:不清空 _active,因为可能有正在进行的请求
33
82
 
34
83
 
35
84
  class DownloaderMeta(ABCMeta):
@@ -42,37 +91,152 @@ class DownloaderMeta(ABCMeta):
42
91
 
43
92
 
44
93
  class DownloaderBase(metaclass=DownloaderMeta):
94
+ """
95
+ 下载器基类 - 提供通用的下载器功能和接口
96
+
97
+ 所有下载器实现都应该继承此基类。
98
+ """
99
+
45
100
  def __init__(self, crawler):
46
101
  self.crawler = crawler
47
102
  self._active = ActivateRequestManager()
48
103
  self.middleware: Optional[MiddlewareManager] = None
49
104
  self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
105
+ self._closed = False
106
+ self._stats_enabled = crawler.settings.get_bool("DOWNLOADER_STATS", True)
50
107
 
51
108
  @classmethod
52
- def create_instance(cls, *args, **kwargs) -> Self:
109
+ def create_instance(cls, *args, **kwargs):
110
+ """创建下载器实例"""
53
111
  return cls(*args, **kwargs)
54
112
 
55
113
  def open(self) -> None:
114
+ """初始化下载器"""
115
+ if self._closed:
116
+ raise RuntimeError(f"{self.__class__.__name__} 已关闭,无法重新打开")
117
+
56
118
  self.logger.info(
57
- f"{self.crawler.spider} <downloader class:{type(self).__name__}>"
58
- f"<concurrency:{self.crawler.settings.get_int('CONCURRENCY')}>"
119
+ f"{self.crawler.spider} <下载器类:{type(self).__name__}> "
120
+ f"<并发数:{self.crawler.settings.get_int('CONCURRENCY')}>"
59
121
  )
60
- self.middleware = MiddlewareManager.create_instance(self.crawler)
61
-
62
- async def fetch(self, request) -> Optional[Response]:
122
+
123
+ try:
124
+ self.middleware = MiddlewareManager.create_instance(self.crawler)
125
+ self.logger.debug(f"{self.__class__.__name__} 中间件初始化完成")
126
+ except Exception as e:
127
+ self.logger.error(f"中间件初始化失败: {e}")
128
+ raise
129
+
130
+ async def fetch(self, request) -> Optional['Response']:
131
+ """获取请求响应(经过中间件处理)"""
132
+ if self._closed:
133
+ raise RuntimeError(f"{self.__class__.__name__} 已关闭")
134
+
135
+ if not self.middleware:
136
+ raise RuntimeError("中间件未初始化")
137
+
63
138
  async with self._active(request):
64
- response = await self.middleware.download(request)
65
- return response
139
+ try:
140
+ response = await self.middleware.download(request)
141
+ return response
142
+ except Exception as e:
143
+ self.logger.error(f"下载请求 {request.url} 失败: {e}")
144
+ raise
66
145
 
67
146
  @abstractmethod
68
- async def download(self, request: Request) -> Response:
147
+ async def download(self, request) -> 'Response':
148
+ """子类必须实现的下载方法"""
69
149
  pass
70
150
 
71
151
  async def close(self) -> None:
72
- pass
152
+ """关闭下载器并清理资源"""
153
+ if not self._closed:
154
+ self._closed = True
155
+ if self._stats_enabled:
156
+ stats = self.get_stats()
157
+ self.logger.info(f"{self.__class__.__name__} 统计: {stats}")
158
+ self.logger.debug(f"{self.__class__.__name__} 已关闭")
73
159
 
74
160
  def idle(self) -> bool:
75
- return len(self) == 0
161
+ """检查是否空闲(无活跃请求)"""
162
+ return len(self._active) == 0
76
163
 
77
164
  def __len__(self) -> int:
165
+ """返回活跃请求数"""
78
166
  return len(self._active)
167
+
168
+ def get_stats(self) -> dict:
169
+ """获取下载器统计信息"""
170
+ base_stats = {
171
+ 'downloader_class': self.__class__.__name__,
172
+ 'is_idle': self.idle(),
173
+ 'is_closed': self._closed
174
+ }
175
+
176
+ if self._stats_enabled:
177
+ base_stats.update(self._active.get_stats())
178
+
179
+ return base_stats
180
+
181
+ def reset_stats(self):
182
+ """重置统计信息"""
183
+ if self._stats_enabled:
184
+ self._active.reset_stats()
185
+
186
+ def health_check(self) -> dict:
187
+ """健康检查"""
188
+ return {
189
+ 'status': 'healthy' if not self._closed and self.middleware else 'unhealthy',
190
+ 'active_requests': len(self._active),
191
+ 'middleware_ready': self.middleware is not None,
192
+ 'closed': self._closed
193
+ }
194
+
195
+
196
+ # 导入具体的下载器实现
197
+ try:
198
+ from .aiohttp_downloader import AioHttpDownloader
199
+ except ImportError:
200
+ AioHttpDownloader = None
201
+
202
+ try:
203
+ from .cffi_downloader import CurlCffiDownloader
204
+ except ImportError:
205
+ CurlCffiDownloader = None
206
+
207
+ try:
208
+ from .httpx_downloader import HttpXDownloader
209
+ except ImportError:
210
+ HttpXDownloader = None
211
+
212
+ # 导出所有可用的类
213
+ __all__ = [
214
+ 'DownloaderBase',
215
+ 'DownloaderMeta',
216
+ 'ActivateRequestManager',
217
+ ]
218
+
219
+ # 添加可用的下载器
220
+ if AioHttpDownloader:
221
+ __all__.append('AioHttpDownloader')
222
+ if CurlCffiDownloader:
223
+ __all__.append('CurlCffiDownloader')
224
+ if HttpXDownloader:
225
+ __all__.append('HttpXDownloader')
226
+
227
+ # 提供便捷的下载器映射
228
+ DOWNLOADER_MAP = {
229
+ 'aiohttp': AioHttpDownloader,
230
+ 'httpx': HttpXDownloader,
231
+ 'curl_cffi': CurlCffiDownloader,
232
+ 'cffi': CurlCffiDownloader, # 别名
233
+ }
234
+
235
+ # 过滤掉不可用的下载器
236
+ DOWNLOADER_MAP = {k: v for k, v in DOWNLOADER_MAP.items() if v is not None}
237
+
238
+ def get_downloader_class(name: str):
239
+ """根据名称获取下载器类"""
240
+ if name in DOWNLOADER_MAP:
241
+ return DOWNLOADER_MAP[name]
242
+ raise ValueError(f"未知的下载器类型: {name}。可用类型: {list(DOWNLOADER_MAP.keys())}")
@@ -12,7 +12,7 @@ from aiohttp import (
12
12
  BasicAuth,
13
13
  )
14
14
 
15
- from crawlo import Response
15
+ from crawlo.network.response import Response
16
16
  from crawlo.utils.log import get_logger
17
17
  from crawlo.downloader import DownloaderBase
18
18
 
@@ -74,9 +74,15 @@ class AioHttpDownloader(DownloaderBase):
74
74
  self.logger.debug("AioHttpDownloader initialized.")
75
75
 
76
76
  async def download(self, request) -> Optional[Response]:
77
+ """下载请求并返回响应"""
77
78
  if not self.session or self.session.closed:
78
79
  raise RuntimeError("AioHttpDownloader session is not open.")
79
80
 
81
+ start_time = None
82
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
83
+ import time
84
+ start_time = time.time()
85
+
80
86
  try:
81
87
  # 使用通用发送逻辑(支持所有 HTTP 方法)
82
88
  async with await self._send_request(self.session, request) as resp:
@@ -86,7 +92,14 @@ class AioHttpDownloader(DownloaderBase):
86
92
  raise OverflowError(f"Response too large: {content_length} > {self.max_download_size}")
87
93
 
88
94
  body = await resp.read()
89
- return self._structure_response(request, resp, body)
95
+ response = self._structure_response(request, resp, body)
96
+
97
+ # 记录下载统计
98
+ if start_time:
99
+ download_time = time.time() - start_time
100
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
101
+
102
+ return response
90
103
 
91
104
  except ClientError as e:
92
105
  self.logger.error(f"Client error for {request.url}: {e}")
@@ -7,7 +7,7 @@ from typing import Optional, Dict, Any
7
7
  from curl_cffi import CurlError
8
8
  from curl_cffi.requests import AsyncSession
9
9
 
10
- from crawlo import Response
10
+ from crawlo.network.response import Response
11
11
  from crawlo.downloader import DownloaderBase
12
12
 
13
13
 
@@ -141,6 +141,11 @@ class CurlCffiDownloader(DownloaderBase):
141
141
  if not self.session:
142
142
  raise RuntimeError("会话未初始化")
143
143
 
144
+ start_time = None
145
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
146
+ import time
147
+ start_time = time.time()
148
+
144
149
  kwargs = self._build_request_kwargs(request)
145
150
  method = request.method.lower()
146
151
 
@@ -173,6 +178,11 @@ class CurlCffiDownloader(DownloaderBase):
173
178
  if actual_size > self.download_warn_size:
174
179
  self.logger.warning(f"响应体较大: {actual_size} 字节,来自 {request.url}")
175
180
 
181
+ # 记录下载统计
182
+ if start_time:
183
+ download_time = time.time() - start_time
184
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {actual_size} bytes")
185
+
176
186
  return self._structure_response(request, response, body)
177
187
 
178
188
  def _build_request_kwargs(self, request) -> Dict[str, Any]:
@@ -4,7 +4,7 @@ import httpx
4
4
  from typing import Optional
5
5
  from httpx import AsyncClient, Timeout, Limits
6
6
 
7
- from crawlo import Response
7
+ from crawlo.network.response import Response
8
8
  from crawlo.downloader import DownloaderBase
9
9
  from crawlo.utils.log import get_logger
10
10
 
@@ -90,9 +90,15 @@ class HttpXDownloader(DownloaderBase):
90
90
  self.logger.debug("HttpXDownloader initialized.")
91
91
 
92
92
  async def download(self, request) -> Optional[Response]:
93
+ """下载请求并返回响应,支持代理失败后的优雅降级"""
93
94
  if not self._client:
94
95
  raise RuntimeError("HttpXDownloader client is not available.")
95
96
 
97
+ start_time = None
98
+ if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
99
+ import time
100
+ start_time = time.time()
101
+
96
102
  # --- 1. 确定要使用的 client 实例 ---
97
103
  effective_client = self._client # 默认使用共享的主 client
98
104
  temp_client = None # 用于可能创建的临时 client
@@ -194,7 +200,12 @@ class HttpXDownloader(DownloaderBase):
194
200
  # --- 7. 读取响应体 ---
195
201
  body = await httpx_response.aread()
196
202
 
197
- # --- 8. 构造并返回 Response ---
203
+ # --- 8. 记录下载统计 ---
204
+ if start_time:
205
+ download_time = time.time() - start_time
206
+ self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s, size: {len(body)} bytes")
207
+
208
+ # --- 9. 构造并返回 Response ---
198
209
  return self.structure_response(request=request, response=httpx_response, body=body)
199
210
 
200
211
  except httpx.TimeoutException as e:
@@ -219,7 +230,7 @@ class HttpXDownloader(DownloaderBase):
219
230
  raise
220
231
 
221
232
  finally:
222
- # --- 9. 清理:关闭临时 client ---
233
+ # --- 10. 清理:关闭临时 client ---
223
234
  # 如果创建了临时 client,则关闭它
224
235
  if temp_client:
225
236
  try:
@@ -1,37 +1,154 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
+ """
4
+ Crawlo Filters Module
5
+ ====================
6
+ 提供多种请求去重过滤器实现。
7
+
8
+ 过滤器类型:
9
+ - MemoryFilter: 基于内存的高效去重,适合单机模式
10
+ - AioRedisFilter: 基于Redis的分布式去重,适合分布式模式
11
+ - MemoryFileFilter: 内存+文件持久化,适合需要重启恢复的场景
12
+
13
+ 核心接口:
14
+ - BaseFilter: 所有过滤器的基类
15
+ - requested(): 检查请求是否重复的主要方法
16
+ """
3
17
  from abc import ABC, abstractmethod
18
+ from typing import Optional
4
19
 
5
- from crawlo import Request
6
20
  from crawlo.utils.request import request_fingerprint
7
21
 
8
22
 
9
23
  class BaseFilter(ABC):
24
+ """
25
+ 请求去重过滤器基类
26
+
27
+ 提供统一的去重接口和统计功能。
28
+ 所有过滤器实现都应该继承此类。
29
+ """
10
30
 
11
- def __init__(self, logger, stats, debug: bool):
31
+ def __init__(self, logger, stats, debug: bool = False):
32
+ """
33
+ 初始化过滤器
34
+
35
+ :param logger: 日志器实例
36
+ :param stats: 统计信息存储
37
+ :param debug: 是否启用调试日志
38
+ """
12
39
  self.logger = logger
13
40
  self.stats = stats
14
41
  self.debug = debug
42
+ self._request_count = 0
43
+ self._duplicate_count = 0
15
44
 
16
45
  @classmethod
17
46
  def create_instance(cls, *args, **kwargs) -> 'BaseFilter':
18
47
  return cls(*args, **kwargs)
19
48
 
20
- def requested(self, request: Request):
49
+ def requested(self, request) -> bool:
50
+ """
51
+ 检查请求是否重复(主要接口)
52
+
53
+ :param request: 请求对象
54
+ :return: True 表示重复,False 表示新请求
55
+ """
56
+ self._request_count += 1
21
57
  fp = request_fingerprint(request)
58
+
22
59
  if fp in self:
60
+ self._duplicate_count += 1
61
+ self.log_stats(request)
23
62
  return True
63
+
24
64
  self.add_fingerprint(fp)
25
65
  return False
26
66
 
27
67
  @abstractmethod
28
- def add_fingerprint(self, fp) -> None:
68
+ def add_fingerprint(self, fp: str) -> None:
69
+ """
70
+ 添加请求指纹(子类必须实现)
71
+
72
+ :param fp: 请求指纹字符串
73
+ """
74
+ pass
75
+
76
+ @abstractmethod
77
+ def __contains__(self, item: str) -> bool:
78
+ """
79
+ 检查指纹是否存在(支持 in 操作符)
80
+
81
+ :param item: 要检查的指纹
82
+ :return: 是否已存在
83
+ """
29
84
  pass
30
85
 
31
- def log_stats(self, request: Request) -> None:
86
+ def log_stats(self, request) -> None:
87
+ """
88
+ 记录统计信息
89
+
90
+ :param request: 重复的请求对象
91
+ """
32
92
  if self.debug:
33
93
  self.logger.debug(f'Filtered duplicate request: {request}')
34
94
  self.stats.inc_value(f'{self}/filtered_count')
95
+
96
+ def get_stats(self) -> dict:
97
+ """
98
+ 获取过滤器统计信息
99
+
100
+ :return: 统计信息字典
101
+ """
102
+ return {
103
+ 'total_requests': self._request_count,
104
+ 'duplicate_requests': self._duplicate_count,
105
+ 'unique_requests': self._request_count - self._duplicate_count,
106
+ 'duplicate_rate': f"{self._duplicate_count / max(1, self._request_count) * 100:.2f}%"
107
+ }
108
+
109
+ def reset_stats(self) -> None:
110
+ """重置统计信息"""
111
+ self._request_count = 0
112
+ self._duplicate_count = 0
113
+
114
+ def close(self) -> None:
115
+ """关闭过滤器并清理资源"""
116
+ pass
35
117
 
36
118
  def __str__(self) -> str:
37
119
  return f'{self.__class__.__name__}'
120
+
121
+
122
+ # 导出所有可用的过滤器
123
+ __all__ = ['BaseFilter']
124
+
125
+ # 动态导入具体实现
126
+ try:
127
+ from .memory_filter import MemoryFilter, MemoryFileFilter
128
+ __all__.extend(['MemoryFilter', 'MemoryFileFilter'])
129
+ except ImportError:
130
+ MemoryFilter = None
131
+ MemoryFileFilter = None
132
+
133
+ try:
134
+ from .aioredis_filter import AioRedisFilter
135
+ __all__.append('AioRedisFilter')
136
+ except ImportError:
137
+ AioRedisFilter = None
138
+
139
+ # 提供便捷的过滤器映射
140
+ FILTER_MAP = {
141
+ 'memory': MemoryFilter,
142
+ 'memory_file': MemoryFileFilter,
143
+ 'redis': AioRedisFilter,
144
+ 'aioredis': AioRedisFilter, # 别名
145
+ }
146
+
147
+ # 过滤掉不可用的过滤器
148
+ FILTER_MAP = {k: v for k, v in FILTER_MAP.items() if v is not None}
149
+
150
+ def get_filter_class(name: str):
151
+ """根据名称获取过滤器类"""
152
+ if name in FILTER_MAP:
153
+ return FILTER_MAP[name]
154
+ raise ValueError(f"未知的过滤器类型: {name}。可用类型: {list(FILTER_MAP.keys())}")