PgsFile 0.4.6__py3-none-any.whl → 0.4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PgsFile might be problematic. Click here for more details.
- PgsFile/PgsFile.py +52 -1
- PgsFile/__init__.py +1 -0
- PgsFile/models/prompts/7. SentAlign prompt.txt +35 -0
- {PgsFile-0.4.6.dist-info → PgsFile-0.4.8.dist-info}/METADATA +3 -3
- {PgsFile-0.4.6.dist-info → PgsFile-0.4.8.dist-info}/RECORD +8 -7
- {PgsFile-0.4.6.dist-info → PgsFile-0.4.8.dist-info}/LICENSE +0 -0
- {PgsFile-0.4.6.dist-info → PgsFile-0.4.8.dist-info}/WHEEL +0 -0
- {PgsFile-0.4.6.dist-info → PgsFile-0.4.8.dist-info}/top_level.txt +0 -0
PgsFile/PgsFile.py
CHANGED
|
@@ -4090,4 +4090,55 @@ def markdown_to_python_object(data):
|
|
|
4090
4090
|
return ast.literal_eval(code_str)
|
|
4091
4091
|
except Exception:
|
|
4092
4092
|
return code_str.strip()
|
|
4093
|
-
|
|
4093
|
+
|
|
4094
|
+
|
|
4095
|
+
import math
|
|
4096
|
+
from collections import defaultdict, Counter
|
|
4097
|
+
|
|
4098
|
+
def tfidf_keyword_extraction(documents, top_percent=(0.0, 0.10)):
|
|
4099
|
+
"""
|
|
4100
|
+
Extract keywords from a small set of tokenized documents using TF-IDF.
|
|
4101
|
+
|
|
4102
|
+
Parameters
|
|
4103
|
+
----------
|
|
4104
|
+
documents : list of list of str
|
|
4105
|
+
Corpus represented as tokenized documents.
|
|
4106
|
+
top_percent : tuple of float
|
|
4107
|
+
Range of percentage (low, high) to select top keyword candidates.
|
|
4108
|
+
|
|
4109
|
+
Returns
|
|
4110
|
+
-------
|
|
4111
|
+
full_list : list of tuple
|
|
4112
|
+
All (term, tf-idf_score) sorted by score in descending order.
|
|
4113
|
+
candidates : list of tuple
|
|
4114
|
+
Keyword candidates from top_10% range.
|
|
4115
|
+
"""
|
|
4116
|
+
log = math.log # local reference for speed
|
|
4117
|
+
|
|
4118
|
+
# Step 1: Compute IDF
|
|
4119
|
+
total_docs = len(documents)
|
|
4120
|
+
doc_freq = defaultdict(int)
|
|
4121
|
+
for doc in documents:
|
|
4122
|
+
for term in set(doc):
|
|
4123
|
+
doc_freq[term] += 1
|
|
4124
|
+
idf = {term: log((total_docs + 1) / (df + 1)) + 1 for term, df in doc_freq.items()}
|
|
4125
|
+
|
|
4126
|
+
# Step 2: Compute TF-IDF
|
|
4127
|
+
tfidf_scores = {}
|
|
4128
|
+
for doc in documents:
|
|
4129
|
+
total_terms = len(doc)
|
|
4130
|
+
term_counts = Counter(doc)
|
|
4131
|
+
for term, count in term_counts.items():
|
|
4132
|
+
tfidf_scores[term] = (count / total_terms) * idf[term] # overwrite as before
|
|
4133
|
+
|
|
4134
|
+
# Step 3: Sort full list
|
|
4135
|
+
full_list = sorted(tfidf_scores.items(), key=lambda x: x[1], reverse=True)
|
|
4136
|
+
|
|
4137
|
+
# Step 4: Extract candidates based on percentage range
|
|
4138
|
+
n_terms = len(full_list)
|
|
4139
|
+
low_cut = int(n_terms * top_percent[0])
|
|
4140
|
+
high_cut = int(n_terms * top_percent[1])
|
|
4141
|
+
candidates = full_list[low_cut:high_cut] # slice range
|
|
4142
|
+
|
|
4143
|
+
return full_list, candidates
|
|
4144
|
+
|
PgsFile/__init__.py
CHANGED
|
@@ -53,6 +53,7 @@ from .PgsFile import word_lemmatize, word_POS, word_NER
|
|
|
53
53
|
from .PgsFile import extract_noun_phrases, get_LLMs_prompt, extract_keywords_en, extract_keywords_en_be21
|
|
54
54
|
from .PgsFile import extract_dependency_relations, extract_dependency_relations_full
|
|
55
55
|
from .PgsFile import predict_category
|
|
56
|
+
from .PgsFile import tfidf_keyword_extraction
|
|
56
57
|
|
|
57
58
|
# 8. Maths
|
|
58
59
|
from .PgsFile import len_rows, check_empty_cells
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
You are an expert in aligning Chinese and English sentences.
|
|
2
|
+
Align Chinese sentences with their corresponding English translations from the input text.
|
|
3
|
+
|
|
4
|
+
Priority:
|
|
5
|
+
• First attempt 1–1 alignment (one Chinese sentence ↔ one English sentence) whenever possible.
|
|
6
|
+
• If no exact 1–1 match is possible, use 1–many (one Chinese sentence ↔ multiple English sentences) or many–1 (multiple Chinese sentences ↔ one English sentence).
|
|
7
|
+
• Ensure every Chinese sentence is included in the alignment—none should be skipped.
|
|
8
|
+
|
|
9
|
+
Input:
|
|
10
|
+
• src: Chinese passage
|
|
11
|
+
• tgt: English passage
|
|
12
|
+
|
|
13
|
+
Output format (Python-list):
|
|
14
|
+
A python List of aligned pairs without extra explanation:
|
|
15
|
+
[aligned_src_sent(s)1, aligned_tgt_sent(s)1]
|
|
16
|
+
|
|
17
|
+
Examples:
|
|
18
|
+
Example 1 (1–1):
|
|
19
|
+
src: "两年以后,大兴安岭。"
|
|
20
|
+
tgt: "Two years later, the Greater Khingan Mountains."
|
|
21
|
+
output: ["两年以后,大兴安岭。", "Two years later, the Greater Khingan Mountains."]
|
|
22
|
+
Example 2 (1–many):
|
|
23
|
+
src: "他沉默了一会儿,然后笑了。"
|
|
24
|
+
tgt: "He was silent for a moment. Then he smiled."
|
|
25
|
+
output: [["他沉默了一会儿,然后笑了。"], ["He was silent for a moment.", "Then he smiled."]]
|
|
26
|
+
Example 3 (many–1):
|
|
27
|
+
src: "风起了。天色渐暗。"
|
|
28
|
+
tgt: "The wind picked up as the sky darkened."
|
|
29
|
+
output: [["风起了。", "天色渐暗。"], ["The wind picked up as the sky darkened."]]
|
|
30
|
+
|
|
31
|
+
Task:
|
|
32
|
+
Using the priority rules above, align the following src and tgt sentences accurately:
|
|
33
|
+
Source Text: {src}
|
|
34
|
+
Target Text: {tgt}
|
|
35
|
+
Output: ["",""]
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PgsFile
|
|
3
|
-
Version: 0.4.
|
|
4
|
-
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports (LLM-based) NLP tasks such as OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, and
|
|
3
|
+
Version: 0.4.8
|
|
4
|
+
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports (LLM-based) NLP tasks such as OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, MIP analysis and Chinese-English sentence alignment. It also generates word lists, and plots data, aiding literary students. Ideal for scraping data, cleaning text, and analyzing language, it offers user-friendly tools to streamline workflows.
|
|
5
5
|
Home-page: https://mp.weixin.qq.com/s/lWMkYDWQMjBJNKY2vMYTpw
|
|
6
6
|
Author: Pan Guisheng
|
|
7
7
|
Author-email: panguisheng@sufe.edu.cn
|
|
@@ -34,7 +34,7 @@ Key Features:
|
|
|
34
34
|
4. **Data Storage:** Write and append data to text files, Excel, JSON, and JSON lines.
|
|
35
35
|
5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, and search for files with specific keywords.
|
|
36
36
|
6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, convert Markdown strings into Python objects, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary, BE21 and BNC-COCA word lists.
|
|
37
|
-
7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC,
|
|
37
|
+
7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC, MIP analysis, and Chinese-English sentence alignment using prepared LLM prompts.
|
|
38
38
|
8. **Math Operations:** Format numbers, convert decimals to percentages, and validate data.
|
|
39
39
|
9. **Visualization:** Process images (e.g., make white pixels transparent, resize images) and manage fonts for rendering text.
|
|
40
40
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
PgsFile/PgsFile.py,sha256=
|
|
2
|
-
PgsFile/__init__.py,sha256=
|
|
1
|
+
PgsFile/PgsFile.py,sha256=3iyfFE5THgwwz0_MtylUlMn-72gsRaCbUxdm9LcI8nQ,169903
|
|
2
|
+
PgsFile/__init__.py,sha256=mWZ8dfTlzeCfTHFlWyHY3vCwqyM4_YQBGPd6vBoNGso,3674
|
|
3
3
|
PgsFile/Corpora/Idioms/English_Idioms_8774.txt,sha256=qlsP0yI_XGECBRiPZuLkGZpdasc77sWSKexANu7v8_M,175905
|
|
4
4
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000000.txt,sha256=SLGGSMSb7Ff1RoBstsTW3yX2wNZpqEUchFNpcI-mrR4,1513
|
|
5
5
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000001.txt,sha256=imOa6UoCOIZoPXT4_HNHgCUJtd4FTIdk2FZNHNBgJyg,3372
|
|
@@ -2591,8 +2591,9 @@ PgsFile/models/prompts/3. ICTCLAS Prompt.txt,sha256=VFn6N_JViAbyy9NazA8gjX6SGo5m
|
|
|
2591
2591
|
PgsFile/models/prompts/4. OCR prompt.txt,sha256=YxUQ2IlE52k0fcBnGsuOHqWAmfiEmIu6iRz5zecQ8dk,260
|
|
2592
2592
|
PgsFile/models/prompts/5. ATE prompt.txt,sha256=5wu0gGlsV7DI0LruYM3-uAC6brppyYD0IoiFVjMqm5Y,1553
|
|
2593
2593
|
PgsFile/models/prompts/6. ATE3 prompt.txt,sha256=VnaXpPa6BgZHUcm8PxmP_qgU-8xEoTB3XcBqjwCUy_g,1254
|
|
2594
|
-
PgsFile
|
|
2595
|
-
PgsFile-0.4.
|
|
2596
|
-
PgsFile-0.4.
|
|
2597
|
-
PgsFile-0.4.
|
|
2598
|
-
PgsFile-0.4.
|
|
2594
|
+
PgsFile/models/prompts/7. SentAlign prompt.txt,sha256=hXpqqC-CAgo8EytkJ0MaLhevLefALazWriY-ew39jxs,1537
|
|
2595
|
+
PgsFile-0.4.8.dist-info/LICENSE,sha256=cE5c-QToSkG1KTUsU8drQXz1vG0EbJWuU4ybHTRb5SE,1138
|
|
2596
|
+
PgsFile-0.4.8.dist-info/METADATA,sha256=kd3UY3kgL0HMBGe16hqzpLeCjc-A4wdKYGshb7FBecw,3065
|
|
2597
|
+
PgsFile-0.4.8.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
2598
|
+
PgsFile-0.4.8.dist-info/top_level.txt,sha256=028hCfwhF3UpfD6X0rwtWpXI1RKSTeZ1ALwagWaSmX8,8
|
|
2599
|
+
PgsFile-0.4.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|