PgsFile 0.4.3__py3-none-any.whl → 0.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PgsFile/PgsFile.py +27 -0
- PgsFile/__init__.py +1 -0
- PgsFile/models/prompts/5. ATE prompt.txt +28 -0
- PgsFile/models/prompts/6. ATE3 prompt.txt +18 -0
- {PgsFile-0.4.3.dist-info → PgsFile-0.4.5.dist-info}/METADATA +4 -4
- {PgsFile-0.4.3.dist-info → PgsFile-0.4.5.dist-info}/RECORD +9 -7
- {PgsFile-0.4.3.dist-info → PgsFile-0.4.5.dist-info}/LICENSE +0 -0
- {PgsFile-0.4.3.dist-info → PgsFile-0.4.5.dist-info}/WHEEL +0 -0
- {PgsFile-0.4.3.dist-info → PgsFile-0.4.5.dist-info}/top_level.txt +0 -0
PgsFile/PgsFile.py
CHANGED
|
@@ -4064,3 +4064,30 @@ def convert_image_to_url(image_path: str) -> str:
|
|
|
4064
4064
|
|
|
4065
4065
|
return image_url
|
|
4066
4066
|
|
|
4067
|
+
import ast
|
|
4068
|
+
def markdown_to_python_object(data):
|
|
4069
|
+
"""
|
|
4070
|
+
If `data` is already a Python object (list, dict, tuple, str, etc.), return it.
|
|
4071
|
+
If it's a Markdown code block, try to parse it into the equivalent Python object.
|
|
4072
|
+
"""
|
|
4073
|
+
# If already a Python object (but not a string), return as is
|
|
4074
|
+
if not isinstance(data, str):
|
|
4075
|
+
return data
|
|
4076
|
+
|
|
4077
|
+
# Match Markdown code block (with or without `python`)
|
|
4078
|
+
code_block = re.search(r"```(?:python)?\s*(.*?)\s*```", data, re.DOTALL)
|
|
4079
|
+
if not code_block:
|
|
4080
|
+
# If there's no triple backticks, try parsing the string directly
|
|
4081
|
+
try:
|
|
4082
|
+
return ast.literal_eval(data)
|
|
4083
|
+
except Exception:
|
|
4084
|
+
return data.strip()
|
|
4085
|
+
|
|
4086
|
+
code_str = code_block.group(1)
|
|
4087
|
+
|
|
4088
|
+
# Try safe parsing
|
|
4089
|
+
try:
|
|
4090
|
+
return ast.literal_eval(code_str)
|
|
4091
|
+
except Exception:
|
|
4092
|
+
return code_str.strip()
|
|
4093
|
+
|
PgsFile/__init__.py
CHANGED
|
@@ -42,6 +42,7 @@ from .PgsFile import replace_chinese_punctuation_with_english
|
|
|
42
42
|
from .PgsFile import replace_english_punctuation_with_chinese
|
|
43
43
|
from .PgsFile import clean_list, clean_text, clean_text_with_abbreviations, clean_line_with_abbreviations
|
|
44
44
|
from .PgsFile import extract_chinese_punctuation, generate_password, sort_strings_with_embedded_numbers
|
|
45
|
+
from .PgsFile import markdown_to_python_object
|
|
45
46
|
|
|
46
47
|
# 7. NLP (natural language processing)
|
|
47
48
|
from .PgsFile import strQ2B_raw, strQ2B_words
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
You are an excellent automatic term extraction (ATE) system. Your task is to identify and extract terms from a given text based on a specified domain. Terms should be domain-specific and should not include named entities. I will provide you with the domain and the sentence from which you need to extract the terms. Please follow the output format provided in the examples.
|
|
2
|
+
|
|
3
|
+
What are the terms in the following text? Terms should not include named entities.
|
|
4
|
+
Output Format: ["list of terms present"]
|
|
5
|
+
If no terms are presented, keep it an empty list: []
|
|
6
|
+
|
|
7
|
+
EXAMPLES:
|
|
8
|
+
|
|
9
|
+
Example 1:
|
|
10
|
+
Sentence: "Treatment of anemia in patients with heart disease: a clinical practice guideline from the American College of Physicians."
|
|
11
|
+
Domain: Heart failure
|
|
12
|
+
Output: ["anemia", "patients", "heart disease", "clinical practice guideline", "Physicians"]
|
|
13
|
+
|
|
14
|
+
Example 2:
|
|
15
|
+
Sentence: "Recommendation 2: ACP recommends against the use of erythropoiesis-stimulating agents in patients with mild to moderate anemia and congestive heart failure or coronary heart disease."
|
|
16
|
+
Domain: Heart failure
|
|
17
|
+
Output: ["erythropoiesis-stimulating agents", "patients", "anemia", "congestive heart failure", "coronary heart disease"]
|
|
18
|
+
|
|
19
|
+
Example 3:
|
|
20
|
+
Sentence: "Moreover, there is yet to be established a common consensus being used in current assays."
|
|
21
|
+
Domain: Heart failure
|
|
22
|
+
Output: []
|
|
23
|
+
|
|
24
|
+
Now, please extract the terms from the following sentence:
|
|
25
|
+
|
|
26
|
+
Sentence: "{sentence}"
|
|
27
|
+
Domain: "{domain}"
|
|
28
|
+
Output: ["list of terms present"]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Extract terms from the input text and classify each term into one of the following categories based on their domain- and lexicon-specificity:
|
|
2
|
+
• Specific_Term: Domain-specific and lexicon-specific (known mainly by experts, highly relevant to the domain)
|
|
3
|
+
• Common_Term: Domain-specific but not lexicon-specific (known by laypersons, still relevant to the domain)
|
|
4
|
+
• OOD_Term: Lexicon-specific but not domain-specific (known mainly by experts, but not relevant to the domain)
|
|
5
|
+
|
|
6
|
+
Terms should not include named entities.
|
|
7
|
+
Output Format (a list of python-tuple): [("2-vessel cad", "Specific_Term"), ("aortic valve", "Common_Term"), ("p-value", "OOD_Term")]
|
|
8
|
+
If no terms are presented, keep it an empty list: []
|
|
9
|
+
|
|
10
|
+
Examples (in the domain of heart failure):
|
|
11
|
+
• "ejection fraction" → "Specific_Term": laypersons generally do not know what it means, and it is strongly related to the domain.
|
|
12
|
+
• "heart" → "Common_Term": relevant to the domain and understandable by the general public.
|
|
13
|
+
• "p-value" → "OOD_Term": expert-level term, but not domain-specific to heart failure.
|
|
14
|
+
|
|
15
|
+
Now, please extract the terms from the following sentence:
|
|
16
|
+
|
|
17
|
+
Sentence: "{sentence}"
|
|
18
|
+
Output: ["list of terms present"]
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PgsFile
|
|
3
|
-
Version: 0.4.
|
|
4
|
-
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports (LLM-based) NLP tasks such as OCR, tokenization, lemmatization, POS tagging, NER,
|
|
3
|
+
Version: 0.4.5
|
|
4
|
+
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports (LLM-based) NLP tasks such as OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, and MIP analysis. It also generates word lists, and plots data, aiding literary students. Ideal for scraping data, cleaning text, and analyzing language, it offers user-friendly tools to streamline workflows.
|
|
5
5
|
Home-page: https://mp.weixin.qq.com/s/lWMkYDWQMjBJNKY2vMYTpw
|
|
6
6
|
Author: Pan Guisheng
|
|
7
7
|
Author-email: panguisheng@sufe.edu.cn
|
|
@@ -33,8 +33,8 @@ Key Features:
|
|
|
33
33
|
3. **Data Retrieval:** Extract data from various file formats like text, JSON, TSV, Excel, XML, and HTML (both online and offline).
|
|
34
34
|
4. **Data Storage:** Write and append data to text files, Excel, JSON, and JSON lines.
|
|
35
35
|
5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, and search for files with specific keywords.
|
|
36
|
-
6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary, BE21 and BNC-COCA word lists.
|
|
37
|
-
7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing,
|
|
36
|
+
6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, convert Markdown strings into Python objects, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary, BE21 and BNC-COCA word lists.
|
|
37
|
+
7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC, and MIP analysis using prepared LLM prompts.
|
|
38
38
|
8. **Math Operations:** Format numbers, convert decimals to percentages, and validate data.
|
|
39
39
|
9. **Visualization:** Process images (e.g., make white pixels transparent, resize images) and manage fonts for rendering text.
|
|
40
40
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
PgsFile/PgsFile.py,sha256=
|
|
2
|
-
PgsFile/__init__.py,sha256=
|
|
1
|
+
PgsFile/PgsFile.py,sha256=lsccmRjB-vHPTeAsGG97CYPjTiTrVnbiiSHCqaGc6sk,168231
|
|
2
|
+
PgsFile/__init__.py,sha256=0yJbrdpue45cageZqejZAsjdWXcEg-Cs2NT7elOF1rQ,3627
|
|
3
3
|
PgsFile/Corpora/Idioms/English_Idioms_8774.txt,sha256=qlsP0yI_XGECBRiPZuLkGZpdasc77sWSKexANu7v8_M,175905
|
|
4
4
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000000.txt,sha256=SLGGSMSb7Ff1RoBstsTW3yX2wNZpqEUchFNpcI-mrR4,1513
|
|
5
5
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000001.txt,sha256=imOa6UoCOIZoPXT4_HNHgCUJtd4FTIdk2FZNHNBgJyg,3372
|
|
@@ -2589,8 +2589,10 @@ PgsFile/models/prompts/1. MIP prompt.txt,sha256=4lHlHmleayRytqr1n9jtt6vn1rQvyf4B
|
|
|
2589
2589
|
PgsFile/models/prompts/2. WSD prompt.txt,sha256=o-ZFtCRUCDrXgm040WTQch9v2Y_r2SIlrZaquilJjgQ,2348
|
|
2590
2590
|
PgsFile/models/prompts/3. ICTCLAS Prompt.txt,sha256=VFn6N_JViAbyy9NazA8gjX6SGo5mgBcZOf95aC9JB84,592
|
|
2591
2591
|
PgsFile/models/prompts/4. OCR prompt.txt,sha256=YxUQ2IlE52k0fcBnGsuOHqWAmfiEmIu6iRz5zecQ8dk,260
|
|
2592
|
-
PgsFile
|
|
2593
|
-
PgsFile
|
|
2594
|
-
PgsFile-0.4.
|
|
2595
|
-
PgsFile-0.4.
|
|
2596
|
-
PgsFile-0.4.
|
|
2592
|
+
PgsFile/models/prompts/5. ATE prompt.txt,sha256=ZJo9BhbbUf7CVXi2Gb5DAsV_2PGgzly2I7ze0grCo2k,1486
|
|
2593
|
+
PgsFile/models/prompts/6. ATE3 prompt.txt,sha256=BcefVd-RM1_IjSH1t9JfRUz6qxIoYrDxORLlysT3H70,1209
|
|
2594
|
+
PgsFile-0.4.5.dist-info/LICENSE,sha256=cE5c-QToSkG1KTUsU8drQXz1vG0EbJWuU4ybHTRb5SE,1138
|
|
2595
|
+
PgsFile-0.4.5.dist-info/METADATA,sha256=trHMmN-jQGE_NxXW35ZdVyOf7XC_Q9xT5v4vMkk48rw,2994
|
|
2596
|
+
PgsFile-0.4.5.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
2597
|
+
PgsFile-0.4.5.dist-info/top_level.txt,sha256=028hCfwhF3UpfD6X0rwtWpXI1RKSTeZ1ALwagWaSmX8,8
|
|
2598
|
+
PgsFile-0.4.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|