re-common 2.0.0__py3-none-any.whl → 10.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- re_common/v2/baselibrary/tools/list_tools.py +0 -0
- re_common/v2/baselibrary/tools/search_hash_tools.py +33 -0
- re_common/v2/baselibrary/tools/text_matcher.py +223 -0
- re_common/v2/baselibrary/utils/BusinessStringUtil.py +2 -2
- re_common/v2/baselibrary/utils/author_smi.py +308 -0
- re_common/v2/baselibrary/utils/string_clear.py +15 -1
- re_common/v2/baselibrary/utils/stringutils.py +36 -1
- {re_common-2.0.0.dist-info → re_common-10.0.0.dist-info}/METADATA +1 -1
- {re_common-2.0.0.dist-info → re_common-10.0.0.dist-info}/RECORD +12 -8
- {re_common-2.0.0.dist-info → re_common-10.0.0.dist-info}/LICENSE +0 -0
- {re_common-2.0.0.dist-info → re_common-10.0.0.dist-info}/WHEEL +0 -0
- {re_common-2.0.0.dist-info → re_common-10.0.0.dist-info}/top_level.txt +0 -0
|
File without changes
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
import jieba
|
|
4
|
+
from datasketch import MinHash
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def tokenize(text: str, stopwords=None) -> List[str]:
|
|
8
|
+
"""
|
|
9
|
+
分词并移除停用词
|
|
10
|
+
"""
|
|
11
|
+
if stopwords is None:
|
|
12
|
+
stopwords = []
|
|
13
|
+
words = jieba.lcut(text)
|
|
14
|
+
# 统计单字符数据 长度,防止结巴分词分不了的单词 将数据分为单个字符
|
|
15
|
+
one_char_size = len([i for i in words if len(i) == 1])
|
|
16
|
+
all_size = len(words)
|
|
17
|
+
# 如果单字符个数超过一定比例 就直接用空格分词
|
|
18
|
+
if all_size != 0 and one_char_size / all_size > 0.6:
|
|
19
|
+
words = [i for i in text.split() if i.strip()]
|
|
20
|
+
|
|
21
|
+
# 过滤停用词和空字符
|
|
22
|
+
words = [w for w in words if w not in stopwords and w.strip()]
|
|
23
|
+
return words
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def create_minhash(words: List[str], num_perm=128) -> MinHash:
|
|
27
|
+
"""
|
|
28
|
+
为分词结果创建 MinHash
|
|
29
|
+
"""
|
|
30
|
+
minhash = MinHash(num_perm=num_perm)
|
|
31
|
+
for word in words:
|
|
32
|
+
minhash.update(word.encode("utf-8"))
|
|
33
|
+
return minhash
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import jieba
|
|
2
|
+
import re
|
|
3
|
+
from typing import List, Dict, Tuple, Set, Optional, Union
|
|
4
|
+
from datasketch import MinHash, MinHashLSH
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class TextMatcher:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
threshold: float = 0.5,
|
|
11
|
+
num_perm: int = 128,
|
|
12
|
+
is_raw_texts=True,
|
|
13
|
+
stopwords_path: Optional[str] = None,
|
|
14
|
+
user_dict_path: Optional[str] = None,
|
|
15
|
+
|
|
16
|
+
):
|
|
17
|
+
"""
|
|
18
|
+
初始化文本匹配器
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
threshold: LSH 相似度阈值
|
|
22
|
+
num_perm: MinHash 排列数
|
|
23
|
+
stopwords_path: 停用词文件路径
|
|
24
|
+
user_dict_path: 用户自定义词典路径
|
|
25
|
+
"""
|
|
26
|
+
self.threshold = threshold
|
|
27
|
+
self.num_perm = num_perm
|
|
28
|
+
self.lsh = MinHashLSH(threshold=threshold, num_perm=num_perm)
|
|
29
|
+
# self.minhashes: Dict[str, MinHash] = {}
|
|
30
|
+
self.raw_texts: Dict[str, str] = {}
|
|
31
|
+
self.is_raw_texts = is_raw_texts
|
|
32
|
+
self.doc_counter = 0
|
|
33
|
+
|
|
34
|
+
# 加载停用词
|
|
35
|
+
self.stopwords: Set[str] = set()
|
|
36
|
+
if stopwords_path:
|
|
37
|
+
self.load_stopwords(stopwords_path)
|
|
38
|
+
|
|
39
|
+
# 加载用户词典
|
|
40
|
+
if user_dict_path:
|
|
41
|
+
jieba.load_userdict(user_dict_path)
|
|
42
|
+
|
|
43
|
+
def load_stopwords(self, stopwords_path: str) -> None:
|
|
44
|
+
"""加载停用词"""
|
|
45
|
+
with open(stopwords_path, "r", encoding="utf-8") as f:
|
|
46
|
+
self.stopwords = set(line.strip() for line in f)
|
|
47
|
+
|
|
48
|
+
def preprocess_text(self, text: str) -> str:
|
|
49
|
+
"""
|
|
50
|
+
文本预处理
|
|
51
|
+
"""
|
|
52
|
+
# 转换为小写
|
|
53
|
+
text = text.lower()
|
|
54
|
+
# 移除特殊字符
|
|
55
|
+
text = re.sub(r"[^\w\s\u4e00-\u9fff]", "", text)
|
|
56
|
+
# 移除多余空格
|
|
57
|
+
text = re.sub(r"\s+", " ", text).strip()
|
|
58
|
+
return text
|
|
59
|
+
|
|
60
|
+
def tokenize(self, text: str) -> List[str]:
|
|
61
|
+
"""
|
|
62
|
+
分词并移除停用词
|
|
63
|
+
"""
|
|
64
|
+
words = jieba.lcut(text)
|
|
65
|
+
one_char_size = len([i for i in words if len(i) == 1])
|
|
66
|
+
all_size = len(words)
|
|
67
|
+
if all_size != 0 and one_char_size / all_size > 0.6:
|
|
68
|
+
words = [i for i in text.split() if i.strip()]
|
|
69
|
+
|
|
70
|
+
# 过滤停用词和空字符
|
|
71
|
+
words = [w for w in words if w not in self.stopwords and w.strip()]
|
|
72
|
+
return words
|
|
73
|
+
|
|
74
|
+
def create_minhash(self, words: List[str]) -> MinHash:
|
|
75
|
+
"""
|
|
76
|
+
为分词结果创建 MinHash
|
|
77
|
+
"""
|
|
78
|
+
minhash = MinHash(num_perm=self.num_perm)
|
|
79
|
+
for word in words:
|
|
80
|
+
minhash.update(word.encode("utf-8"))
|
|
81
|
+
return minhash
|
|
82
|
+
|
|
83
|
+
def add_document(self, text: str, doc_id: Optional[str] = None) -> str:
|
|
84
|
+
"""
|
|
85
|
+
添加文档到索引
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
text: 文档文本
|
|
89
|
+
doc_id: 文档ID(可选)
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
doc_id: 文档ID
|
|
93
|
+
"""
|
|
94
|
+
if doc_id is None:
|
|
95
|
+
doc_id = f"doc_{self.doc_counter}"
|
|
96
|
+
self.doc_counter += 1
|
|
97
|
+
|
|
98
|
+
# 预处理和分词
|
|
99
|
+
processed_text = self.preprocess_text(text)
|
|
100
|
+
words = self.tokenize(processed_text)
|
|
101
|
+
|
|
102
|
+
# 创建 MinHash
|
|
103
|
+
minhash = self.create_minhash(words)
|
|
104
|
+
if self.is_raw_texts:
|
|
105
|
+
# 存储原始文本和 MinHash
|
|
106
|
+
self.raw_texts[doc_id] = text
|
|
107
|
+
# self.minhashes[doc_id] = minhash
|
|
108
|
+
|
|
109
|
+
# 添加到 LSH
|
|
110
|
+
self.lsh.insert(doc_id, minhash)
|
|
111
|
+
|
|
112
|
+
return doc_id
|
|
113
|
+
|
|
114
|
+
def batch_add_documents(self, texts: Dict[str, str]) -> None:
|
|
115
|
+
"""
|
|
116
|
+
批量添加文档
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
texts: {doc_id: text} 的字典
|
|
120
|
+
"""
|
|
121
|
+
for doc_id, text in texts.items():
|
|
122
|
+
self.add_document(text, doc_id)
|
|
123
|
+
|
|
124
|
+
def create_query_minhash(self, query: str):
|
|
125
|
+
|
|
126
|
+
# 预处理查询文本
|
|
127
|
+
processed_query = self.preprocess_text(query)
|
|
128
|
+
query_words = self.tokenize(processed_query)
|
|
129
|
+
# print(query_words)
|
|
130
|
+
query_minhash = self.create_minhash(query_words)
|
|
131
|
+
return query_minhash
|
|
132
|
+
|
|
133
|
+
def find_similar(self, query_minhash: MinHash, return_similarities: bool = False) -> Union[
|
|
134
|
+
List[str], List[Tuple[str, float]]]:
|
|
135
|
+
"""
|
|
136
|
+
查找相似文档
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
query: 查询文本
|
|
140
|
+
return_similarities: 是否返回相似度分数
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
如果 return_similarities 为 True,返回 [(doc_id, similarity), ...]
|
|
144
|
+
否则返回 [doc_id, ...]
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
# 使用 LSH 查找候选集
|
|
148
|
+
similar_docs = self.lsh.query(query_minhash)
|
|
149
|
+
|
|
150
|
+
# if return_similarities:
|
|
151
|
+
# # 计算精确的 Jaccard 相似度
|
|
152
|
+
# results = []
|
|
153
|
+
# for doc_id in similar_docs:
|
|
154
|
+
# similarity = query_minhash.jaccard(self.minhashes[doc_id])
|
|
155
|
+
# results.append((doc_id, similarity))
|
|
156
|
+
# # 按相似度降序排序
|
|
157
|
+
# return sorted(results, key=lambda x: x[1], reverse=True)
|
|
158
|
+
|
|
159
|
+
return similar_docs
|
|
160
|
+
|
|
161
|
+
def get_text(self, doc_id: str) -> Optional[str]:
|
|
162
|
+
"""获取原始文本"""
|
|
163
|
+
if self.is_raw_texts:
|
|
164
|
+
return self.raw_texts.get(doc_id)
|
|
165
|
+
raise Exception("没有开启存储")
|
|
166
|
+
|
|
167
|
+
def remove_document(self, doc_id: str) -> bool:
|
|
168
|
+
"""
|
|
169
|
+
删除文档
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
bool: 是否成功删除
|
|
173
|
+
"""
|
|
174
|
+
# if doc_id not in self.minhashes:
|
|
175
|
+
# return False
|
|
176
|
+
|
|
177
|
+
self.lsh.remove(doc_id)
|
|
178
|
+
# del self.minhashes[doc_id]
|
|
179
|
+
if self.is_raw_texts:
|
|
180
|
+
del self.raw_texts[doc_id]
|
|
181
|
+
return True
|
|
182
|
+
|
|
183
|
+
def clear(self) -> None:
|
|
184
|
+
"""清空所有数据"""
|
|
185
|
+
self.lsh = MinHashLSH(threshold=self.threshold, num_perm=self.num_perm)
|
|
186
|
+
# self.minhashes.clear()
|
|
187
|
+
self.raw_texts.clear()
|
|
188
|
+
self.doc_counter = 0
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
if __name__ == "__main__":
|
|
192
|
+
# 创建匹配器实例
|
|
193
|
+
matcher = TextMatcher(
|
|
194
|
+
threshold=0.1, # 相似度阈值
|
|
195
|
+
num_perm=128, # MinHash 排列数
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# 添加单个文档
|
|
199
|
+
doc_id = matcher.add_document(
|
|
200
|
+
"北京是中国的首都"
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# 批量添加文档
|
|
204
|
+
docs = {"doc1": "北京是一座现代化的大都市", "doc2": "上海是中国最大的城市", "doc3": "中国的首都是北京"}
|
|
205
|
+
matcher.batch_add_documents(docs)
|
|
206
|
+
|
|
207
|
+
# 查找相似文档(不返回相似度分数)
|
|
208
|
+
similar_docs = matcher.find_similar("北京首都")
|
|
209
|
+
print("相似文档ID:", similar_docs)
|
|
210
|
+
|
|
211
|
+
# 查找相似文档(返回相似度分数)
|
|
212
|
+
similar_docs_with_scores = matcher.find_similar("北京首都", return_similarities=True)
|
|
213
|
+
print("相似文档ID和分数:", similar_docs_with_scores)
|
|
214
|
+
|
|
215
|
+
# 获取原始文本
|
|
216
|
+
for doc_id, score in similar_docs_with_scores:
|
|
217
|
+
print(f"文档 {doc_id}: {matcher.get_text(doc_id)} (相似度: {score:.2f})")
|
|
218
|
+
|
|
219
|
+
# 删除文档
|
|
220
|
+
matcher.remove_document("doc1")
|
|
221
|
+
|
|
222
|
+
# 清空所有数据
|
|
223
|
+
matcher.clear()
|
|
@@ -49,12 +49,12 @@ def clean_organ_postcode(organ):
|
|
|
49
49
|
return format_organ.strip()
|
|
50
50
|
|
|
51
51
|
|
|
52
|
-
|
|
53
52
|
def get_first_organ(organ):
|
|
54
53
|
if not organ:
|
|
55
54
|
return ""
|
|
56
55
|
organ_list = organ.strip().split(";")
|
|
57
56
|
for organ_one in organ_list:
|
|
57
|
+
# 清理邮政编码
|
|
58
58
|
organ_one = clean_organ_postcode(organ_one)
|
|
59
59
|
if organ_one:
|
|
60
60
|
return organ_one
|
|
@@ -62,7 +62,7 @@ def get_first_organ(organ):
|
|
|
62
62
|
return ""
|
|
63
63
|
|
|
64
64
|
|
|
65
|
-
def get_first_author(author:
|
|
65
|
+
def get_first_author(author: str) -> str:
|
|
66
66
|
if not author:
|
|
67
67
|
return ""
|
|
68
68
|
au_list = author.strip().split(";")
|
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import string
|
|
3
|
+
|
|
4
|
+
import regex
|
|
5
|
+
import unicodedata
|
|
6
|
+
from rapidfuzz._utils import setupPandas, is_none
|
|
7
|
+
from rapidfuzz.distance import Jaro
|
|
8
|
+
|
|
9
|
+
"""
|
|
10
|
+
作者比率分布 大部分在 1和 2
|
|
11
|
+
1-2 675092763
|
|
12
|
+
2-3 49335191
|
|
13
|
+
3-4 440848
|
|
14
|
+
4-5 9953
|
|
15
|
+
其他都是几百 几十和几个 不用考虑
|
|
16
|
+
如果 大于5 大降分
|
|
17
|
+
3-4 4-5 分两个段降分 3-4 降得最少
|
|
18
|
+
1-3 不降分
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
additional_chars = '‑–‐’·.—'
|
|
22
|
+
extended_punctuation = string.punctuation + additional_chars
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_diacritic_variant(char1):
|
|
26
|
+
# 将字符转换为标准的 Unicode 形式
|
|
27
|
+
normalized_char1 = unicodedata.normalize('NFD', char1)
|
|
28
|
+
|
|
29
|
+
# 获取基本字符(去掉变音符号)
|
|
30
|
+
base_char1 = ''.join(c for c in normalized_char1 if unicodedata.category(c) != 'Mn')
|
|
31
|
+
|
|
32
|
+
# 判断基本字符是否相同
|
|
33
|
+
return base_char1
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def detect_other_languages(text):
|
|
37
|
+
# 匹配所有非中文、非英文、非数字字符
|
|
38
|
+
pattern = r'[^\u4E00-\u9FFFa-zA-Z0-9\s.,!?;:\'\"()‑\-–—‐’·˜.]'
|
|
39
|
+
|
|
40
|
+
# 使用正则表达式查找
|
|
41
|
+
matches = re.findall(pattern, text)
|
|
42
|
+
|
|
43
|
+
# 如果找到匹配的字符,表示存在非中文、非英文、非数字的语言字符
|
|
44
|
+
return bool(matches)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def extract_initials(text):
|
|
48
|
+
# 按空格分隔字符串
|
|
49
|
+
words = text.split()
|
|
50
|
+
|
|
51
|
+
# 提取每个单词的首字母并转化为大写
|
|
52
|
+
initials = ''.join(word[0].upper() for word in words)
|
|
53
|
+
|
|
54
|
+
return initials
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def is_contained(str1, str2):
|
|
58
|
+
# 判断是否是包含关系
|
|
59
|
+
return str1 in str2 or str2 in str1
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
# list1 是否包含 list2 如果包含 return True
|
|
63
|
+
def is_contained_list(list1, list2):
|
|
64
|
+
# 检查 list2 中每个元素的出现次数,是否能在 list1 中找到足够的数量
|
|
65
|
+
for item in list2:
|
|
66
|
+
if list2.count(item) > list1.count(item):
|
|
67
|
+
return False
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def check_common_elements_by_length_rank(list1, list2):
|
|
72
|
+
# 获取两个列表的交集
|
|
73
|
+
set1 = set(list1)
|
|
74
|
+
set2 = set(list2)
|
|
75
|
+
|
|
76
|
+
common_elements = set1 & set2 # 获取交集
|
|
77
|
+
|
|
78
|
+
if not common_elements:
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
# 确定较短的列表
|
|
82
|
+
short_list = list1 if len(list1) < len(list2) else list2
|
|
83
|
+
|
|
84
|
+
# 按字符长度排序短列表
|
|
85
|
+
sorted_short_list = sorted(short_list, key=len)
|
|
86
|
+
|
|
87
|
+
for word in common_elements:
|
|
88
|
+
# 获取该单词在短列表中的字符长度排名
|
|
89
|
+
length_rank = sorted_short_list.index(word) + 1 # +1 因为列表索引从0开始
|
|
90
|
+
# 如果单个字母跳过
|
|
91
|
+
if len(word) == 1:
|
|
92
|
+
continue
|
|
93
|
+
|
|
94
|
+
if length_rank / len(sorted_short_list) > 0.5:
|
|
95
|
+
# 说明 命中了长字符串相等
|
|
96
|
+
return True
|
|
97
|
+
|
|
98
|
+
return False
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def remove_punctuation(text):
|
|
102
|
+
# 20241226 替换掉自定义符号集
|
|
103
|
+
text = regex.sub("[\\p{P}¥+=˛`$<¸´~^¥≤℃×■¨°>|ⅰⅱⅲⅳⅴⅵⅶⅹⅺⅻ]", "", text.lower())
|
|
104
|
+
# text = text.translate(str.maketrans('', '', extended_punctuation))
|
|
105
|
+
return text
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def space_punctuation(text):
|
|
109
|
+
# 使用空格替换符号
|
|
110
|
+
return text.translate(str.maketrans(extended_punctuation, ' ' * len(extended_punctuation), ''))
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def custom_rstrip(s):
|
|
114
|
+
# 去除尾部的指定子串,顺序删除
|
|
115
|
+
s = s.strip()
|
|
116
|
+
if s.endswith("."):
|
|
117
|
+
s = s[:-1] # 删除最后的 "."
|
|
118
|
+
s = s.strip()
|
|
119
|
+
if s.endswith("jr"):
|
|
120
|
+
s = s[:-2] # 删除最后的 "jr"
|
|
121
|
+
s = s.strip()
|
|
122
|
+
if s.endswith(","):
|
|
123
|
+
s = s[:-1] # 删除最后的 ","
|
|
124
|
+
s = s.strip()
|
|
125
|
+
|
|
126
|
+
return s
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def deal_str_first(s1):
|
|
130
|
+
# 先对数据处理一波
|
|
131
|
+
s1 = s1.replace("’", "")
|
|
132
|
+
return s1
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def AuthorRatio(
|
|
136
|
+
s1,
|
|
137
|
+
s2,
|
|
138
|
+
*,
|
|
139
|
+
processor=None,
|
|
140
|
+
score_cutoff=None,
|
|
141
|
+
is_delete_jr=True,
|
|
142
|
+
):
|
|
143
|
+
# 判空需要
|
|
144
|
+
setupPandas()
|
|
145
|
+
# 如果为空就没有相似度
|
|
146
|
+
if is_none(s1) or is_none(s2):
|
|
147
|
+
return 0
|
|
148
|
+
|
|
149
|
+
# 处理字符串的程序
|
|
150
|
+
if processor is not None:
|
|
151
|
+
s1 = processor(s1)
|
|
152
|
+
s2 = processor(s2)
|
|
153
|
+
|
|
154
|
+
# 处理后是否为空字符串,如果有 返回0
|
|
155
|
+
if not s1 or not s2:
|
|
156
|
+
return 0
|
|
157
|
+
|
|
158
|
+
s1 = get_diacritic_variant(s1)
|
|
159
|
+
s2 = get_diacritic_variant(s2)
|
|
160
|
+
# 这里提出来是为了少计算 但后期需要平衡内存和算力
|
|
161
|
+
s1_punc = remove_punctuation(s1)
|
|
162
|
+
s2_punc = remove_punctuation(s2)
|
|
163
|
+
s1_punc_split = s1_punc.split()
|
|
164
|
+
s2_punc_split = s2_punc.split()
|
|
165
|
+
|
|
166
|
+
def compare_strings(s1_punc, s2_punc):
|
|
167
|
+
# 去除字符串中的空白字符
|
|
168
|
+
cleaned_s1 = re.sub(r'\s+', '', s1_punc)
|
|
169
|
+
cleaned_s2 = re.sub(r'\s+', '', s2_punc)
|
|
170
|
+
|
|
171
|
+
# 如果两个字符串相等,返回 相等
|
|
172
|
+
if cleaned_s1 == cleaned_s2:
|
|
173
|
+
return "equal"
|
|
174
|
+
# 如果一个字符串包含另一个字符串,返回 子字符串
|
|
175
|
+
elif cleaned_s1 in cleaned_s2 or cleaned_s2 in cleaned_s1:
|
|
176
|
+
return "subset"
|
|
177
|
+
# 否则返回 无关
|
|
178
|
+
else:
|
|
179
|
+
return "unrelated"
|
|
180
|
+
|
|
181
|
+
# 如果去除符号后相等 那么就是100% 的相同作者 这里主要防止顺序颠倒的问题
|
|
182
|
+
if len(s1_punc_split) == len(s2_punc_split) and set(s1_punc_split) == set(s2_punc_split):
|
|
183
|
+
return 1
|
|
184
|
+
|
|
185
|
+
# 如果少一个单词,认为是正确的包含关系,在简写中会出现这种情况
|
|
186
|
+
if is_contained_list(s1_punc_split, s2_punc_split) or is_contained_list(s2_punc_split, s1_punc_split):
|
|
187
|
+
return 0.98
|
|
188
|
+
|
|
189
|
+
rus = compare_strings(s1_punc, s2_punc)
|
|
190
|
+
# 如果顺序去字符 去空格完全相等 那么作者相同 “Hoorani, H. R.” -> 'Hoorani, HR'
|
|
191
|
+
if rus == "equal":
|
|
192
|
+
return 1
|
|
193
|
+
|
|
194
|
+
# 在外文中 jr 代表儿子 我现在需要去掉这个字符带来的影响,可以用参数控制
|
|
195
|
+
if is_delete_jr:
|
|
196
|
+
s1_n = custom_rstrip(s1.lower())
|
|
197
|
+
s1 = s1[:len(s1_n)]
|
|
198
|
+
s2_n = custom_rstrip(s2.lower())
|
|
199
|
+
s2 = s2[:len(s2_n)]
|
|
200
|
+
|
|
201
|
+
# 这里正向是为了解决 Liao, Zhan -> Liao Z. 这样的关系 但是反向会导致上面的错误存在
|
|
202
|
+
if len(s1_punc_split) == len(s2_punc_split) and rus == "subset":
|
|
203
|
+
if len(s1_punc_split[-1]) == 1 or len(s2_punc_split[-1]) == 1:
|
|
204
|
+
if s1_punc_split[0] == s2_punc_split[0] and s1_punc_split[-1][:1] == s2_punc_split[-1][:1]:
|
|
205
|
+
return 1
|
|
206
|
+
# return 0.96 # 如果单词数一致 是包含关系 但会出现这样的 Li Li 和 Li Liang 会被判定为一样 所以这里不给满分
|
|
207
|
+
|
|
208
|
+
# 使用正则表达式替换多个空格为一个空格
|
|
209
|
+
l1 = re.sub(r'\s+', ' ', space_punctuation(s1.replace("'", "")).strip()).strip().split()
|
|
210
|
+
l2 = re.sub(r'\s+', ' ', space_punctuation(s2.replace("'", "")).strip()).strip().split()
|
|
211
|
+
|
|
212
|
+
def is_same_or_initials_match(l1, l2):
|
|
213
|
+
"""
|
|
214
|
+
判断两个字符串是否完全相同,或者它们的首字母是否相同。
|
|
215
|
+
bool: 如果两个字符串完全相同,或它们的首字母匹配,返回 True;否则返回 False。
|
|
216
|
+
"""
|
|
217
|
+
|
|
218
|
+
# 使用 zip() 同时遍历 l1 和 l2 中的字符
|
|
219
|
+
for i1, i2 in zip(l1, l2):
|
|
220
|
+
# 如果两个字符忽略大小写后相同,继续比较下一个字符
|
|
221
|
+
if i1.lower() == i2.lower():
|
|
222
|
+
continue
|
|
223
|
+
|
|
224
|
+
# 如果其中一个字符的长度为1(即是单个字母),检查它们的首字母是否匹配
|
|
225
|
+
if len(i1) == 1 or len(i2) == 1:
|
|
226
|
+
# 比较它们的首字母(不区分大小写)
|
|
227
|
+
if i1[0].upper() == i2[0].upper():
|
|
228
|
+
continue
|
|
229
|
+
else:
|
|
230
|
+
return False # 如果首字母不同,则返回 False
|
|
231
|
+
|
|
232
|
+
# 如果上面条件都不满足,说明字符不匹配,直接返回 False
|
|
233
|
+
return False
|
|
234
|
+
|
|
235
|
+
# 如果循环结束都没有提前返回 False,则表示两个字符串完全匹配,返回 True
|
|
236
|
+
return True
|
|
237
|
+
|
|
238
|
+
if len(l1[-1]) != 1 and l1[-1].isupper():
|
|
239
|
+
t_str = l1[-1]
|
|
240
|
+
l1 = l1[:-1]
|
|
241
|
+
l1.extend(list(t_str))
|
|
242
|
+
if len(l2[-1]) != 1 and l2[-1].isupper():
|
|
243
|
+
t_str = l2[-1]
|
|
244
|
+
l2 = l2[:-1]
|
|
245
|
+
l2.extend(list(t_str))
|
|
246
|
+
|
|
247
|
+
# 如果长度相等 简写也是单词的首字母 那么两个名字一致 举例:"María M.Martorell", "Martorell, María M."
|
|
248
|
+
if len(l1) == len(l2) and (is_same_or_initials_match(l1, l2) or set(l1) == set(l2)):
|
|
249
|
+
return 1
|
|
250
|
+
|
|
251
|
+
##############################################################
|
|
252
|
+
# 以上为情况穷举情况,以下为其他情况的相似率计算
|
|
253
|
+
##############################################################
|
|
254
|
+
|
|
255
|
+
# 设置score_cutoff 默认值为0
|
|
256
|
+
if score_cutoff is None:
|
|
257
|
+
score_cutoff = 0
|
|
258
|
+
|
|
259
|
+
len1 = len(s1)
|
|
260
|
+
len2 = len(s2)
|
|
261
|
+
# 用长字符串除以 短字符串 得到字符串长度的比率
|
|
262
|
+
len_ratio = len1 / len2 if len1 > len2 else len2 / len1
|
|
263
|
+
|
|
264
|
+
# 计算归一化的 Indel 相似度。 对于比率<score_cutoff,返回0。
|
|
265
|
+
end_ratio = normal_end_ratio = Jaro.normalized_similarity(s1, s2)
|
|
266
|
+
|
|
267
|
+
# 需要对作者的比率分布进行调研决定哪些是小比率哪些是大比率
|
|
268
|
+
if len_ratio > 1.5 and len_ratio < 3:
|
|
269
|
+
# 计算线性下降的减分比例
|
|
270
|
+
# 当 len_ratio = 1.5 时,reduction_factor = 1.0
|
|
271
|
+
# 当 len_ratio = 3.0 时,reduction_factor = 0.9
|
|
272
|
+
reduction_factor = 1.0 - (len_ratio - 1.5) * (0.1 / 1.5)
|
|
273
|
+
end_ratio = end_ratio * reduction_factor
|
|
274
|
+
if len_ratio > 3 and len_ratio < 4: # 应该少量降分
|
|
275
|
+
end_ratio = end_ratio * 0.9
|
|
276
|
+
if len_ratio > 4 and len_ratio < 5: # 应该中量降分
|
|
277
|
+
end_ratio = end_ratio * 0.8
|
|
278
|
+
if len_ratio > 5: # 应该降分
|
|
279
|
+
end_ratio = end_ratio * 0.7
|
|
280
|
+
|
|
281
|
+
# 变音提分已经在上面解决了
|
|
282
|
+
# # 非英语 非汉语提分 与 英文对比时 提分
|
|
283
|
+
# if any([detect_other_languages(s1), detect_other_languages(s2)]) and not all([detect_other_languages(s1),
|
|
284
|
+
# detect_other_languages(s2)]):
|
|
285
|
+
# # 应该提分
|
|
286
|
+
# end_ratio = end_ratio * 1.1
|
|
287
|
+
|
|
288
|
+
# 首字母相同提分
|
|
289
|
+
# if is_contained(extract_initials(s1), extract_initials(s2)):
|
|
290
|
+
if is_contained_list([i[:1] for i in l1], [i[:1] for i in l2]):
|
|
291
|
+
# 应该提分
|
|
292
|
+
end_ratio = end_ratio * 1.05
|
|
293
|
+
else:
|
|
294
|
+
end_ratio = end_ratio * 0.9
|
|
295
|
+
|
|
296
|
+
if len(l1) != len(l2):
|
|
297
|
+
end_ratio = end_ratio * 0.92
|
|
298
|
+
|
|
299
|
+
# 相同部分在短的数据的词中的长度位置 如果是简写相同 不应该提分
|
|
300
|
+
if check_common_elements_by_length_rank(l1, l2) and len_ratio > 1.5:
|
|
301
|
+
# 应该提分
|
|
302
|
+
end_ratio = end_ratio * 1.1
|
|
303
|
+
|
|
304
|
+
if l1[0] != l2[0]:
|
|
305
|
+
end_ratio = end_ratio * Jaro.normalized_similarity(l1[0], l2[0])
|
|
306
|
+
|
|
307
|
+
# 如果字符串本身的相似度高 应该拉上去 否者应该拉下来
|
|
308
|
+
return min(end_ratio, 1) * 0.5 + normal_end_ratio * 0.5
|
|
@@ -14,6 +14,10 @@ class StringClear(object):
|
|
|
14
14
|
self.obj_str = ''
|
|
15
15
|
return self
|
|
16
16
|
|
|
17
|
+
def to_str(self):
|
|
18
|
+
self.obj_str = str(self.obj_str)
|
|
19
|
+
return self
|
|
20
|
+
|
|
17
21
|
def qj_to_bj(self):
|
|
18
22
|
# 全角变半角
|
|
19
23
|
self.obj_str = qj2bj(self.obj_str)
|
|
@@ -22,6 +26,7 @@ class StringClear(object):
|
|
|
22
26
|
def bj_to_qj(self):
|
|
23
27
|
# 半角变全角
|
|
24
28
|
self.obj_str = bj2qj(self.obj_str)
|
|
29
|
+
return self
|
|
25
30
|
|
|
26
31
|
def lower(self):
|
|
27
32
|
self.obj_str = self.obj_str.lower()
|
|
@@ -64,6 +69,7 @@ class StringClear(object):
|
|
|
64
69
|
def remove_diacritics(self):
|
|
65
70
|
# 去除音标 转换成字母
|
|
66
71
|
self.obj_str = get_diacritic_variant(self.obj_str)
|
|
72
|
+
return self
|
|
67
73
|
|
|
68
74
|
def remove_brackets(self):
|
|
69
75
|
# 移除 方括号里面的内容
|
|
@@ -81,4 +87,12 @@ class StringClear(object):
|
|
|
81
87
|
|
|
82
88
|
def rel_clear(str_obj):
|
|
83
89
|
# 为融合数据定制的 清理规则
|
|
84
|
-
return StringClear(str_obj)
|
|
90
|
+
return (StringClear(str_obj)
|
|
91
|
+
.None_to_str() # 空对象转str 防止空对象
|
|
92
|
+
.to_str() # 防止其他类型传入
|
|
93
|
+
.qj_to_bj() # 全角转半角
|
|
94
|
+
.remove_special_chars() # 移除特殊字符,仅保留字母、数字、空格和汉字 \w 已经包括所有 Unicode 字母 下划线 _ 会被保留
|
|
95
|
+
.collapse_spaces() # 移除多余空格,连续多个空格变一个
|
|
96
|
+
.lower() # 小写
|
|
97
|
+
.get_str() # 获取str
|
|
98
|
+
.strip()) # 去掉空格
|
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
import regex
|
|
1
4
|
import unicodedata
|
|
2
5
|
|
|
3
6
|
|
|
@@ -57,4 +60,36 @@ def get_diacritic_variant(char1):
|
|
|
57
60
|
base_char1 = ''.join(c for c in normalized_char1 if unicodedata.category(c) != 'Mn')
|
|
58
61
|
|
|
59
62
|
# 判断基本字符是否相同
|
|
60
|
-
return base_char1
|
|
63
|
+
return base_char1
|
|
64
|
+
|
|
65
|
+
def get_alphabetic_ratio(text: str) -> float:
|
|
66
|
+
if not text:
|
|
67
|
+
return 0
|
|
68
|
+
|
|
69
|
+
text = re.sub(r'\d+', '', text)
|
|
70
|
+
|
|
71
|
+
# 正则表达式匹配字母型文字(包括拉丁字母、希腊字母、西里尔字母、阿拉伯字母等)
|
|
72
|
+
alphabetic_pattern = (
|
|
73
|
+
r"[\u0041-\u005A\u0061-\u007A" # 拉丁字母 (A-Z, a-z)
|
|
74
|
+
r"\u00C0-\u00FF" # 带重音符号的拉丁字母 (À-ÿ)
|
|
75
|
+
r"\u0080–\u00FF" # 拉丁字母补充1
|
|
76
|
+
r"\u0100–\u017F" # 拉丁字母扩展A
|
|
77
|
+
r"\u1E00-\u1EFF" # 拉丁扩展 (Latin Extended Additional)
|
|
78
|
+
r"\u0180-\u024F" # 拉丁扩展-B (Latin Extended-B)
|
|
79
|
+
r"\u2C60-\u2C7F" # 拉丁扩展-C (Latin Extended Additional)
|
|
80
|
+
r"\uA720-\uA7FF" # 拉丁扩展-D (Latin Extended Additional)
|
|
81
|
+
r"\uAB30-\uAB6F" # 拉丁扩展-E (Latin Extended Additional)
|
|
82
|
+
r"]"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# 使用正则表达式过滤出语言文字
|
|
86
|
+
clean_text = regex.sub(r"[^\p{L}]", "", text)
|
|
87
|
+
|
|
88
|
+
if len(clean_text) == 0:
|
|
89
|
+
return 1.0
|
|
90
|
+
|
|
91
|
+
# 匹配所有字母型字符
|
|
92
|
+
alphabetic_chars = re.findall(alphabetic_pattern, clean_text)
|
|
93
|
+
|
|
94
|
+
# 返回字母型字符所占比例
|
|
95
|
+
return len(alphabetic_chars) / len(clean_text)
|
|
@@ -167,15 +167,19 @@ re_common/v2/baselibrary/s3object/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeR
|
|
|
167
167
|
re_common/v2/baselibrary/s3object/baseboto3.py,sha256=mXuIFx99pnrPGQ4LJCZwlN1HLbaU-OWLwck0cVzW6hc,11203
|
|
168
168
|
re_common/v2/baselibrary/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
169
169
|
re_common/v2/baselibrary/tools/dict_tools.py,sha256=HW-YZOUhv5GMzFsF-ArLfDoszui1K3_M7IiRIe4VEXA,909
|
|
170
|
+
re_common/v2/baselibrary/tools/list_tools.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
171
|
+
re_common/v2/baselibrary/tools/search_hash_tools.py,sha256=o-PNjmcYDkfyiR75Jci_9sSn4cGi_F9jPCIrwYdnb1U,1013
|
|
172
|
+
re_common/v2/baselibrary/tools/text_matcher.py,sha256=F4WtLO-b7H6V9TIvOntCD9ZXSQP_KijPuLLYcLPtrKQ,7021
|
|
170
173
|
re_common/v2/baselibrary/tools/unionfind_tools.py,sha256=VYHZZPXwBYljsm7TjV1B6iCgDn3O3btzNf9hMvQySVU,2965
|
|
171
|
-
re_common/v2/baselibrary/utils/BusinessStringUtil.py,sha256=
|
|
174
|
+
re_common/v2/baselibrary/utils/BusinessStringUtil.py,sha256=dxrWO800wElZM_4aKolUHSPBYZlxqzXukE4M-LZ13jA,2644
|
|
172
175
|
re_common/v2/baselibrary/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
176
|
+
re_common/v2/baselibrary/utils/author_smi.py,sha256=_P3I5JXvxHqNNWUwhAyHiJuBFiC0tXvGD8-_HxNiuEU,11051
|
|
173
177
|
re_common/v2/baselibrary/utils/basedict.py,sha256=tSV85pARe8ZQDY77_h_heS81EWwcgJW076DcA9WQyjY,1161
|
|
174
178
|
re_common/v2/baselibrary/utils/basehdfs.py,sha256=NVV5Q0OMPlM_zTrs9ZDoPJv29GQv5wi9-AP1us5dBrQ,4651
|
|
175
179
|
re_common/v2/baselibrary/utils/json_cls.py,sha256=dHOkWafG9lbQDoub9cbDwT2fDjMKtblQnjFLeA4hECA,286
|
|
176
180
|
re_common/v2/baselibrary/utils/string_bool.py,sha256=f5qYdKvTufxmfSsxXN41WFLV--vCwDWU2LeQPbDvKZY,178
|
|
177
|
-
re_common/v2/baselibrary/utils/string_clear.py,sha256=
|
|
178
|
-
re_common/v2/baselibrary/utils/stringutils.py,sha256=
|
|
181
|
+
re_common/v2/baselibrary/utils/string_clear.py,sha256=LqGvv-UZnsVwiDBN3-PdzDUTfWlAsKsvKlkXqySI0eE,3244
|
|
182
|
+
re_common/v2/baselibrary/utils/stringutils.py,sha256=lhDvRL60S6gjhU4D0nfk2Y-c25IyYdYOD0TMoCx-huE,2658
|
|
179
183
|
re_common/vip/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
180
184
|
re_common/vip/base_step_process.py,sha256=VXXiNj0I5CpzXIMCgOPU86bzDJkSBkUS-9CpZIl_GOk,205
|
|
181
185
|
re_common/vip/baseencodeid.py,sha256=nERoe89ueFM52bG7xwJdflcZHk6T2RQQKbc5uUZc3RM,3272
|
|
@@ -202,8 +206,8 @@ re_common/vip/title/transform/TransformRegulationTitleToZt.py,sha256=LKRdIsWKues
|
|
|
202
206
|
re_common/vip/title/transform/TransformStandardTitleToZt.py,sha256=-fCKAbSBzXVyQDCE61CalvR9E_QzQMA08QOO_NePFNI,5563
|
|
203
207
|
re_common/vip/title/transform/TransformThesisTitleToZt.py,sha256=QS-uV0cQrpUFAcKucuJQ9Ue2VRQH-inmfn_X3IplfRo,5488
|
|
204
208
|
re_common/vip/title/transform/__init__.py,sha256=m83-CWyRq_VHPYHaALEQlmXrkTdrZ3e4B_kCfBYE-uc,239
|
|
205
|
-
re_common-
|
|
206
|
-
re_common-
|
|
207
|
-
re_common-
|
|
208
|
-
re_common-
|
|
209
|
-
re_common-
|
|
209
|
+
re_common-10.0.0.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
210
|
+
re_common-10.0.0.dist-info/METADATA,sha256=C8xtx6EWq_g7ScVYYKNZRwq7IuZ_z2esfPwhztPshE0,581
|
|
211
|
+
re_common-10.0.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
212
|
+
re_common-10.0.0.dist-info/top_level.txt,sha256=_H9H23zoLIalm1AIY_KYTVh_H0ZnmjxQIxsvXtLv45o,10
|
|
213
|
+
re_common-10.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|