crawlo 1.1.3__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (115) hide show
  1. crawlo/__init__.py +28 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/commands/startproject.py +117 -13
  8. crawlo/config.py +30 -0
  9. crawlo/config_validator.py +253 -0
  10. crawlo/core/engine.py +185 -11
  11. crawlo/core/scheduler.py +49 -78
  12. crawlo/crawler.py +6 -6
  13. crawlo/downloader/__init__.py +24 -0
  14. crawlo/downloader/aiohttp_downloader.py +8 -0
  15. crawlo/downloader/cffi_downloader.py +5 -0
  16. crawlo/downloader/hybrid_downloader.py +214 -0
  17. crawlo/downloader/playwright_downloader.py +403 -0
  18. crawlo/downloader/selenium_downloader.py +473 -0
  19. crawlo/extension/__init__.py +17 -10
  20. crawlo/extension/health_check.py +142 -0
  21. crawlo/extension/log_interval.py +27 -18
  22. crawlo/extension/log_stats.py +62 -24
  23. crawlo/extension/logging_extension.py +18 -9
  24. crawlo/extension/memory_monitor.py +105 -0
  25. crawlo/extension/performance_profiler.py +134 -0
  26. crawlo/extension/request_recorder.py +108 -0
  27. crawlo/filters/aioredis_filter.py +50 -12
  28. crawlo/middleware/proxy.py +26 -2
  29. crawlo/mode_manager.py +24 -19
  30. crawlo/network/request.py +30 -3
  31. crawlo/network/response.py +114 -25
  32. crawlo/pipelines/mongo_pipeline.py +81 -66
  33. crawlo/pipelines/mysql_pipeline.py +165 -43
  34. crawlo/pipelines/redis_dedup_pipeline.py +7 -3
  35. crawlo/queue/queue_manager.py +15 -2
  36. crawlo/queue/redis_priority_queue.py +144 -76
  37. crawlo/settings/default_settings.py +93 -121
  38. crawlo/subscriber.py +62 -37
  39. crawlo/templates/project/items.py.tmpl +1 -1
  40. crawlo/templates/project/middlewares.py.tmpl +73 -49
  41. crawlo/templates/project/pipelines.py.tmpl +51 -295
  42. crawlo/templates/project/settings.py.tmpl +93 -17
  43. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  44. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  45. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  46. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  47. crawlo/templates/spider/spider.py.tmpl +2 -38
  48. crawlo/tools/__init__.py +183 -0
  49. crawlo/tools/anti_crawler.py +269 -0
  50. crawlo/tools/authenticated_proxy.py +241 -0
  51. crawlo/tools/data_validator.py +181 -0
  52. crawlo/tools/date_tools.py +36 -0
  53. crawlo/tools/distributed_coordinator.py +387 -0
  54. crawlo/tools/retry_mechanism.py +221 -0
  55. crawlo/tools/scenario_adapter.py +263 -0
  56. crawlo/utils/__init__.py +29 -1
  57. crawlo/utils/batch_processor.py +261 -0
  58. crawlo/utils/date_tools.py +58 -1
  59. crawlo/utils/enhanced_error_handler.py +360 -0
  60. crawlo/utils/env_config.py +106 -0
  61. crawlo/utils/error_handler.py +126 -0
  62. crawlo/utils/performance_monitor.py +285 -0
  63. crawlo/utils/redis_connection_pool.py +335 -0
  64. crawlo/utils/redis_key_validator.py +200 -0
  65. crawlo-1.1.5.dist-info/METADATA +401 -0
  66. crawlo-1.1.5.dist-info/RECORD +185 -0
  67. tests/advanced_tools_example.py +276 -0
  68. tests/authenticated_proxy_example.py +237 -0
  69. tests/cleaners_example.py +161 -0
  70. tests/config_validation_demo.py +103 -0
  71. tests/date_tools_example.py +181 -0
  72. tests/dynamic_loading_example.py +524 -0
  73. tests/dynamic_loading_test.py +105 -0
  74. tests/env_config_example.py +134 -0
  75. tests/error_handling_example.py +172 -0
  76. tests/redis_key_validation_demo.py +131 -0
  77. tests/response_improvements_example.py +145 -0
  78. tests/test_advanced_tools.py +149 -0
  79. tests/test_all_redis_key_configs.py +146 -0
  80. tests/test_authenticated_proxy.py +142 -0
  81. tests/test_cleaners.py +55 -0
  82. tests/test_comprehensive.py +147 -0
  83. tests/test_config_validator.py +194 -0
  84. tests/test_date_tools.py +124 -0
  85. tests/test_dynamic_downloaders_proxy.py +125 -0
  86. tests/test_dynamic_proxy.py +93 -0
  87. tests/test_dynamic_proxy_config.py +147 -0
  88. tests/test_dynamic_proxy_real.py +110 -0
  89. tests/test_edge_cases.py +304 -0
  90. tests/test_enhanced_error_handler.py +271 -0
  91. tests/test_env_config.py +122 -0
  92. tests/test_error_handler_compatibility.py +113 -0
  93. tests/test_framework_env_usage.py +104 -0
  94. tests/test_integration.py +357 -0
  95. tests/test_item_dedup_redis_key.py +123 -0
  96. tests/test_parsel.py +30 -0
  97. tests/test_performance.py +328 -0
  98. tests/test_queue_manager_redis_key.py +177 -0
  99. tests/test_redis_connection_pool.py +295 -0
  100. tests/test_redis_key_naming.py +182 -0
  101. tests/test_redis_key_validator.py +124 -0
  102. tests/test_response_improvements.py +153 -0
  103. tests/test_simple_response.py +62 -0
  104. tests/test_telecom_spider_redis_key.py +206 -0
  105. tests/test_template_content.py +88 -0
  106. tests/test_template_redis_key.py +135 -0
  107. tests/test_tools.py +154 -0
  108. tests/tools_example.py +258 -0
  109. crawlo/core/enhanced_engine.py +0 -190
  110. crawlo-1.1.3.dist-info/METADATA +0 -635
  111. crawlo-1.1.3.dist-info/RECORD +0 -113
  112. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
  113. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
  114. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
  115. {examples → tests}/controlled_spider_example.py +0 -0
crawlo/__init__.py CHANGED
@@ -9,7 +9,21 @@ from crawlo.network.request import Request
9
9
  from crawlo.network.response import Response
10
10
  from crawlo.downloader import DownloaderBase
11
11
  from crawlo.middleware import BaseMiddleware
12
-
12
+ from crawlo.utils import (
13
+ TimeUtils,
14
+ parse_time,
15
+ format_time,
16
+ time_diff,
17
+ to_timestamp,
18
+ to_datetime,
19
+ now,
20
+ to_timezone,
21
+ to_utc,
22
+ to_local,
23
+ from_timestamp_with_tz
24
+ )
25
+ from crawlo import cleaners
26
+ from crawlo import tools
13
27
 
14
28
  # 版本号:优先从元数据读取
15
29
  try:
@@ -31,5 +45,18 @@ __all__ = [
31
45
  'Response',
32
46
  'DownloaderBase',
33
47
  'BaseMiddleware',
48
+ 'TimeUtils',
49
+ 'parse_time',
50
+ 'format_time',
51
+ 'time_diff',
52
+ 'to_timestamp',
53
+ 'to_datetime',
54
+ 'now',
55
+ 'to_timezone',
56
+ 'to_utc',
57
+ 'to_local',
58
+ 'from_timestamp_with_tz',
59
+ 'cleaners',
60
+ 'tools',
34
61
  '__version__',
35
62
  ]
crawlo/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.1.3"
1
+ __version__ = "1.1.5"
@@ -0,0 +1,61 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 数据清洗工具包
7
+ """
8
+
9
+ from .text_cleaner import (
10
+ TextCleaner,
11
+ remove_html_tags,
12
+ decode_html_entities,
13
+ remove_extra_whitespace,
14
+ remove_special_chars,
15
+ normalize_unicode,
16
+ clean_text,
17
+ extract_numbers,
18
+ extract_emails,
19
+ extract_urls
20
+ )
21
+
22
+ from .data_formatter import (
23
+ DataFormatter,
24
+ format_number,
25
+ format_currency,
26
+ format_percentage,
27
+ format_phone_number,
28
+ format_chinese_id_card,
29
+ capitalize_words
30
+ )
31
+
32
+ from .encoding_converter import (
33
+ EncodingConverter,
34
+ detect_encoding,
35
+ to_utf8,
36
+ convert_encoding
37
+ )
38
+
39
+ __all__ = [
40
+ "TextCleaner",
41
+ "DataFormatter",
42
+ "EncodingConverter",
43
+ "remove_html_tags",
44
+ "decode_html_entities",
45
+ "remove_extra_whitespace",
46
+ "remove_special_chars",
47
+ "normalize_unicode",
48
+ "clean_text",
49
+ "extract_numbers",
50
+ "extract_emails",
51
+ "extract_urls",
52
+ "format_number",
53
+ "format_currency",
54
+ "format_percentage",
55
+ "format_phone_number",
56
+ "format_chinese_id_card",
57
+ "capitalize_words",
58
+ "detect_encoding",
59
+ "to_utf8",
60
+ "convert_encoding"
61
+ ]
@@ -0,0 +1,226 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 数据格式化工具
7
+ """
8
+ import re
9
+ from typing import Any, Optional, Union
10
+ from decimal import Decimal, InvalidOperation
11
+
12
+
13
+ class DataFormatter:
14
+ """
15
+ 数据格式化工具类,提供各种数据格式化功能。
16
+ 特别适用于爬虫中处理各种数据类型的格式化需求。
17
+ """
18
+
19
+ @staticmethod
20
+ def format_number(value: Any,
21
+ precision: int = 2,
22
+ thousand_separator: bool = False) -> Optional[str]:
23
+ """
24
+ 格式化数字
25
+
26
+ :param value: 数字值
27
+ :param precision: 小数点精度
28
+ :param thousand_separator: 是否使用千位分隔符
29
+ :return: 格式化后的数字字符串
30
+ """
31
+ if value is None:
32
+ return None
33
+
34
+ try:
35
+ # 转换为Decimal以避免浮点数精度问题
36
+ decimal_value = Decimal(str(value))
37
+
38
+ if thousand_separator:
39
+ # 使用千位分隔符
40
+ formatted = f"{decimal_value:,.{precision}f}"
41
+ else:
42
+ # 不使用千位分隔符
43
+ formatted = f"{decimal_value:.{precision}f}"
44
+
45
+ return formatted
46
+ except (ValueError, InvalidOperation):
47
+ return None
48
+
49
+ @staticmethod
50
+ def format_currency(value: Any,
51
+ currency_symbol: str = "¥",
52
+ precision: int = 2) -> Optional[str]:
53
+ """
54
+ 格式化货币
55
+
56
+ :param value: 货币值
57
+ :param currency_symbol: 货币符号
58
+ :param precision: 小数点精度
59
+ :return: 格式化后的货币字符串
60
+ """
61
+ formatted_number = DataFormatter.format_number(value, precision, thousand_separator=True)
62
+ if formatted_number is None:
63
+ return None
64
+
65
+ return f"{currency_symbol}{formatted_number}"
66
+
67
+ @staticmethod
68
+ def format_percentage(value: Any,
69
+ precision: int = 2,
70
+ multiply_100: bool = True) -> Optional[str]:
71
+ """
72
+ 格式化百分比
73
+
74
+ :param value: 百分比值
75
+ :param precision: 小数点精度
76
+ :param multiply_100: 是否乘以100(如果原始值是小数)
77
+ :return: 格式化后的百分比字符串
78
+ """
79
+ if value is None:
80
+ return None
81
+
82
+ try:
83
+ decimal_value = Decimal(str(value))
84
+
85
+ if multiply_100:
86
+ decimal_value *= 100
87
+
88
+ formatted = f"{decimal_value:.{precision}f}%"
89
+ return formatted
90
+ except (ValueError, InvalidOperation):
91
+ return None
92
+
93
+ @staticmethod
94
+ def format_phone_number(phone: str,
95
+ country_code: str = "+86",
96
+ format_type: str = "international") -> Optional[str]:
97
+ """
98
+ 格式化电话号码
99
+
100
+ :param phone: 电话号码
101
+ :param country_code: 国家代码
102
+ :param format_type: 格式类型 ('international', 'domestic', 'plain')
103
+ :return: 格式化后的电话号码
104
+ """
105
+ if not isinstance(phone, str):
106
+ phone = str(phone)
107
+
108
+ # 移除所有非数字字符
109
+ digits = re.sub(r'\D', '', phone)
110
+
111
+ if not digits:
112
+ return None
113
+
114
+ # 如果是11位中国手机号
115
+ if len(digits) == 11 and digits.startswith('1'):
116
+ if format_type == "international":
117
+ return f"{country_code} {digits[:3]} {digits[3:7]} {digits[7:]}"
118
+ elif format_type == "domestic":
119
+ return f"{digits[:3]}-{digits[3:7]}-{digits[7:]}"
120
+ else: # plain
121
+ return digits
122
+ else:
123
+ # 其他情况简单处理
124
+ if format_type == "international" and country_code:
125
+ return f"{country_code} {digits}"
126
+ else:
127
+ return digits
128
+
129
+ @staticmethod
130
+ def format_chinese_id_card(id_card: str) -> Optional[str]:
131
+ """
132
+ 格式化中国身份证号码(隐藏中间部分)
133
+
134
+ :param id_card: 身份证号码
135
+ :return: 格式化后的身份证号码
136
+ """
137
+ if not isinstance(id_card, str):
138
+ id_card = str(id_card)
139
+
140
+ # 移除空格
141
+ id_card = id_card.replace(" ", "")
142
+
143
+ if len(id_card) == 18:
144
+ # 18位身份证号
145
+ return f"{id_card[:6]}********{id_card[-4:]}"
146
+ elif len(id_card) == 15:
147
+ # 15位身份证号
148
+ return f"{id_card[:6]}******{id_card[-3:]}"
149
+ else:
150
+ return None
151
+
152
+ @staticmethod
153
+ def capitalize_words(text: str,
154
+ delimiter: str = " ",
155
+ preserve_articles: bool = True) -> str:
156
+ """
157
+ 单词首字母大写
158
+
159
+ :param text: 文本
160
+ :param delimiter: 单词分隔符
161
+ :param preserve_articles: 是否保留冠词小写
162
+ :return: 首字母大写后的文本
163
+ """
164
+ if not isinstance(text, str):
165
+ return str(text)
166
+
167
+ # 常见的冠词和介词
168
+ articles = {'a', 'an', 'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by'}
169
+
170
+ words = text.split(delimiter)
171
+ capitalized_words = []
172
+
173
+ for i, word in enumerate(words):
174
+ if not word:
175
+ capitalized_words.append(word)
176
+ continue
177
+
178
+ # 第一个单词和最后一个单词总是大写
179
+ if i == 0 or i == len(words) - 1 or not preserve_articles or word.lower() not in articles:
180
+ capitalized_words.append(word.capitalize())
181
+ else:
182
+ capitalized_words.append(word.lower())
183
+
184
+ return delimiter.join(capitalized_words)
185
+
186
+
187
+ # =======================对外接口=======================
188
+
189
+ def format_number(value: Any,
190
+ precision: int = 2,
191
+ thousand_separator: bool = False) -> Optional[str]:
192
+ """格式化数字"""
193
+ return DataFormatter.format_number(value, precision, thousand_separator)
194
+
195
+
196
+ def format_currency(value: Any,
197
+ currency_symbol: str = "¥",
198
+ precision: int = 2) -> Optional[str]:
199
+ """格式化货币"""
200
+ return DataFormatter.format_currency(value, currency_symbol, precision)
201
+
202
+
203
+ def format_percentage(value: Any,
204
+ precision: int = 2,
205
+ multiply_100: bool = True) -> Optional[str]:
206
+ """格式化百分比"""
207
+ return DataFormatter.format_percentage(value, precision, multiply_100)
208
+
209
+
210
+ def format_phone_number(phone: str,
211
+ country_code: str = "+86",
212
+ format_type: str = "international") -> Optional[str]:
213
+ """格式化电话号码"""
214
+ return DataFormatter.format_phone_number(phone, country_code, format_type)
215
+
216
+
217
+ def format_chinese_id_card(id_card: str) -> Optional[str]:
218
+ """格式化中国身份证号码"""
219
+ return DataFormatter.format_chinese_id_card(id_card)
220
+
221
+
222
+ def capitalize_words(text: str,
223
+ delimiter: str = " ",
224
+ preserve_articles: bool = True) -> str:
225
+ """单词首字母大写"""
226
+ return DataFormatter.capitalize_words(text, delimiter, preserve_articles)
@@ -0,0 +1,126 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 编码转换工具
7
+ """
8
+ try:
9
+ import chardet
10
+ HAS_CHARDET = True
11
+ except ImportError:
12
+ HAS_CHARDET = False
13
+ from typing import Optional, Union
14
+
15
+
16
+ class EncodingConverter:
17
+ """
18
+ 编码转换工具类,提供各种编码转换功能。
19
+ 特别适用于爬虫中处理不同编码的网页内容。
20
+ """
21
+
22
+ @staticmethod
23
+ def detect_encoding(data: Union[str, bytes]) -> Optional[str]:
24
+ """
25
+ 检测数据编码
26
+
27
+ :param data: 数据(字符串或字节)
28
+ :return: 检测到的编码
29
+ """
30
+ if isinstance(data, str):
31
+ # 如果是字符串,直接返回
32
+ return 'utf-8'
33
+
34
+ if not isinstance(data, bytes):
35
+ return None
36
+
37
+ if HAS_CHARDET:
38
+ try:
39
+ # 使用chardet检测编码
40
+ result = chardet.detect(data)
41
+ return result['encoding']
42
+ except Exception:
43
+ return None
44
+ else:
45
+ # 如果没有chardet,返回None
46
+ return None
47
+
48
+ @staticmethod
49
+ def to_utf8(data: Union[str, bytes], source_encoding: Optional[str] = None) -> Optional[str]:
50
+ """
51
+ 转换为UTF-8编码的字符串
52
+
53
+ :param data: 数据(字符串或字节)
54
+ :param source_encoding: 源编码(如果为None则自动检测)
55
+ :return: UTF-8编码的字符串
56
+ """
57
+ if isinstance(data, str):
58
+ # 如果已经是字符串,假设它已经是UTF-8
59
+ return data
60
+
61
+ if not isinstance(data, bytes):
62
+ return None
63
+
64
+ try:
65
+ if source_encoding is None:
66
+ # 自动检测编码
67
+ source_encoding = EncodingConverter.detect_encoding(data)
68
+ if source_encoding is None:
69
+ # 如果检测失败,尝试常见编码
70
+ for encoding in ['utf-8', 'gbk', 'gb2312', 'latin1']:
71
+ try:
72
+ decoded = data.decode(encoding)
73
+ return decoded
74
+ except UnicodeDecodeError:
75
+ continue
76
+ return None
77
+ else:
78
+ # 使用指定编码
79
+ return data.decode(source_encoding)
80
+
81
+ # 使用检测到的编码解码
82
+ return data.decode(source_encoding)
83
+ except Exception:
84
+ return None
85
+
86
+ @staticmethod
87
+ def convert_encoding(data: Union[str, bytes],
88
+ source_encoding: Optional[str] = None,
89
+ target_encoding: str = 'utf-8') -> Optional[bytes]:
90
+ """
91
+ 编码转换
92
+
93
+ :param data: 数据(字符串或字节)
94
+ :param source_encoding: 源编码(如果为None则自动检测)
95
+ :param target_encoding: 目标编码
96
+ :return: 转换后的字节数据
97
+ """
98
+ # 先转换为UTF-8字符串
99
+ utf8_str = EncodingConverter.to_utf8(data, source_encoding)
100
+ if utf8_str is None:
101
+ return None
102
+
103
+ try:
104
+ # 再转换为目标编码
105
+ return utf8_str.encode(target_encoding)
106
+ except Exception:
107
+ return None
108
+
109
+
110
+ # =======================对外接口=======================
111
+
112
+ def detect_encoding(data: Union[str, bytes]) -> Optional[str]:
113
+ """检测数据编码"""
114
+ return EncodingConverter.detect_encoding(data)
115
+
116
+
117
+ def to_utf8(data: Union[str, bytes], source_encoding: Optional[str] = None) -> Optional[str]:
118
+ """转换为UTF-8编码的字符串"""
119
+ return EncodingConverter.to_utf8(data, source_encoding)
120
+
121
+
122
+ def convert_encoding(data: Union[str, bytes],
123
+ source_encoding: Optional[str] = None,
124
+ target_encoding: str = 'utf-8') -> Optional[bytes]:
125
+ """编码转换"""
126
+ return EncodingConverter.convert_encoding(data, source_encoding, target_encoding)
@@ -0,0 +1,233 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 文本清洗工具
7
+ """
8
+ import re
9
+ import html
10
+ from typing import Optional, Union, List
11
+ import unicodedata
12
+
13
+
14
+ class TextCleaner:
15
+ """
16
+ 文本清洗工具类,提供各种文本清洗功能。
17
+ 特别适用于爬虫中处理网页内容的清洗需求。
18
+ """
19
+
20
+ @staticmethod
21
+ def remove_html_tags(text: str) -> str:
22
+ """
23
+ 移除HTML标签
24
+
25
+ :param text: 包含HTML标签的文本
26
+ :return: 移除HTML标签后的文本
27
+ """
28
+ if not isinstance(text, str):
29
+ return str(text)
30
+
31
+ # 使用正则表达式移除HTML标签
32
+ clean_text = re.sub(r'<[^>]+>', '', text)
33
+ return clean_text.strip()
34
+
35
+ @staticmethod
36
+ def decode_html_entities(text: str) -> str:
37
+ """
38
+ 解码HTML实体字符
39
+
40
+ :param text: 包含HTML实体字符的文本
41
+ :return: 解码后的文本
42
+ """
43
+ if not isinstance(text, str):
44
+ return str(text)
45
+
46
+ return html.unescape(text)
47
+
48
+ @staticmethod
49
+ def remove_extra_whitespace(text: str) -> str:
50
+ """
51
+ 移除多余的空白字符(包括空格、制表符、换行符等)
52
+
53
+ :param text: 文本
54
+ :return: 清理后的文本
55
+ """
56
+ if not isinstance(text, str):
57
+ return str(text)
58
+
59
+ # 将多个连续的空白字符替换为单个空格
60
+ clean_text = re.sub(r'\s+', ' ', text)
61
+ return clean_text.strip()
62
+
63
+ @staticmethod
64
+ def remove_special_chars(text: str, chars: str = '') -> str:
65
+ """
66
+ 移除特殊字符
67
+
68
+ :param text: 文本
69
+ :param chars: 要移除的特殊字符
70
+ :return: 清理后的文本
71
+ """
72
+ if not isinstance(text, str):
73
+ return str(text)
74
+
75
+ # 移除常见的特殊字符
76
+ special_chars = r'[^\w\s\u4e00-\u9fff' + chars + r']'
77
+ clean_text = re.sub(special_chars, '', text)
78
+ return clean_text
79
+
80
+ @staticmethod
81
+ def normalize_unicode(text: str) -> str:
82
+ """
83
+ 标准化Unicode字符
84
+
85
+ :param text: 文本
86
+ :return: 标准化后的文本
87
+ """
88
+ if not isinstance(text, str):
89
+ return str(text)
90
+
91
+ return unicodedata.normalize('NFKC', text)
92
+
93
+ @staticmethod
94
+ def clean_text(text: str,
95
+ remove_html: bool = True,
96
+ decode_entities: bool = True,
97
+ remove_whitespace: bool = True,
98
+ remove_special: bool = False,
99
+ normalize: bool = True) -> str:
100
+ """
101
+ 综合文本清洗方法
102
+
103
+ :param text: 原始文本
104
+ :param remove_html: 是否移除HTML标签
105
+ :param decode_entities: 是否解码HTML实体
106
+ :param remove_whitespace: 是否移除多余空白字符
107
+ :param remove_special: 是否移除特殊字符
108
+ :param normalize: 是否标准化Unicode字符
109
+ :return: 清洗后的文本
110
+ """
111
+ if not isinstance(text, str):
112
+ text = str(text)
113
+
114
+ if not text:
115
+ return text
116
+
117
+ # 按顺序进行清洗
118
+ if remove_html:
119
+ text = TextCleaner.remove_html_tags(text)
120
+
121
+ if decode_entities:
122
+ text = TextCleaner.decode_html_entities(text)
123
+
124
+ if normalize:
125
+ text = TextCleaner.normalize_unicode(text)
126
+
127
+ if remove_whitespace:
128
+ text = TextCleaner.remove_extra_whitespace(text)
129
+
130
+ if remove_special:
131
+ text = TextCleaner.remove_special_chars(text)
132
+
133
+ return text
134
+
135
+ @staticmethod
136
+ def extract_numbers(text: str) -> List[str]:
137
+ """
138
+ 从文本中提取数字
139
+
140
+ :param text: 文本
141
+ :return: 数字列表
142
+ """
143
+ if not isinstance(text, str):
144
+ return []
145
+
146
+ # 匹配整数和小数
147
+ numbers = re.findall(r'-?\d+\.?\d*', text)
148
+ return numbers
149
+
150
+ @staticmethod
151
+ def extract_emails(text: str) -> List[str]:
152
+ """
153
+ 从文本中提取邮箱地址
154
+
155
+ :param text: 文本
156
+ :return: 邮箱地址列表
157
+ """
158
+ if not isinstance(text, str):
159
+ return []
160
+
161
+ # 匹配邮箱地址
162
+ emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)
163
+ return emails
164
+
165
+ @staticmethod
166
+ def extract_urls(text: str) -> List[str]:
167
+ """
168
+ 从文本中提取URL
169
+
170
+ :param text: 文本
171
+ :return: URL列表
172
+ """
173
+ if not isinstance(text, str):
174
+ return []
175
+
176
+ # 匹配URL
177
+ urls = re.findall(
178
+ r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
179
+ text
180
+ )
181
+ return urls
182
+
183
+
184
+ # =======================对外接口=======================
185
+
186
+ def remove_html_tags(text: str) -> str:
187
+ """移除HTML标签"""
188
+ return TextCleaner.remove_html_tags(text)
189
+
190
+
191
+ def decode_html_entities(text: str) -> str:
192
+ """解码HTML实体字符"""
193
+ return TextCleaner.decode_html_entities(text)
194
+
195
+
196
+ def remove_extra_whitespace(text: str) -> str:
197
+ """移除多余的空白字符"""
198
+ return TextCleaner.remove_extra_whitespace(text)
199
+
200
+
201
+ def remove_special_chars(text: str, chars: str = '') -> str:
202
+ """移除特殊字符"""
203
+ return TextCleaner.remove_special_chars(text, chars)
204
+
205
+
206
+ def normalize_unicode(text: str) -> str:
207
+ """标准化Unicode字符"""
208
+ return TextCleaner.normalize_unicode(text)
209
+
210
+
211
+ def clean_text(text: str,
212
+ remove_html: bool = True,
213
+ decode_entities: bool = True,
214
+ remove_whitespace: bool = True,
215
+ remove_special: bool = False,
216
+ normalize: bool = True) -> str:
217
+ """综合文本清洗"""
218
+ return TextCleaner.clean_text(text, remove_html, decode_entities, remove_whitespace, remove_special, normalize)
219
+
220
+
221
+ def extract_numbers(text: str) -> List[str]:
222
+ """提取数字"""
223
+ return TextCleaner.extract_numbers(text)
224
+
225
+
226
+ def extract_emails(text: str) -> List[str]:
227
+ """提取邮箱地址"""
228
+ return TextCleaner.extract_emails(text)
229
+
230
+
231
+ def extract_urls(text: str) -> List[str]:
232
+ """提取URL"""
233
+ return TextCleaner.extract_urls(text)