PgsFile 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PgsFile might be problematic. Click here for more details.
- PgsFile/PgsFile.py +79 -0
- PgsFile/__init__.py +3 -1
- PgsFile/models/prompts/9. TextClassification prompt.txt +32 -0
- pgsfile-0.5.4.dist-info/METADATA +65 -0
- {PgsFile-0.5.2.dist-info → pgsfile-0.5.4.dist-info}/RECORD +9 -8
- {PgsFile-0.5.2.dist-info → pgsfile-0.5.4.dist-info}/WHEEL +1 -1
- PgsFile-0.5.2.dist-info/METADATA +0 -43
- /PgsFile/models/prompts/{3. ICTCLAS Prompt.txt → 3. ICTCLAS prompt.txt} +0 -0
- {PgsFile-0.5.2.dist-info → pgsfile-0.5.4.dist-info/licenses}/LICENSE +0 -0
- {PgsFile-0.5.2.dist-info → pgsfile-0.5.4.dist-info}/top_level.txt +0 -0
PgsFile/PgsFile.py
CHANGED
|
@@ -4281,3 +4281,82 @@ def maketmx(
|
|
|
4281
4281
|
with open(tmx_path, 'w', encoding='utf-8') as f:
|
|
4282
4282
|
doc.writexml(f, indent='\t', newl='\n', addindent='\t')
|
|
4283
4283
|
|
|
4284
|
+
raw_translation_prompts = {
|
|
4285
|
+
"general": {
|
|
4286
|
+
"basic": "Act as a professional Chinese–English translator specializing in finance and economics. Translate the following text: {text}",
|
|
4287
|
+
"formal_report": "Translate the following Chinese text into English, targeting an international audience with a formal tone suitable for a financial report: {text}",
|
|
4288
|
+
"social_media": "Translate the following Chinese text into English, keeping a friendly and approachable tone suitable for social media content about finance: {text}",
|
|
4289
|
+
},
|
|
4290
|
+
"technical": {
|
|
4291
|
+
"regulation": "Translate the following Chinese financial regulation into English, using terminology consistent with the People’s Bank of China (PBC) and international financial standards. Maintain a formal tone: {text}",
|
|
4292
|
+
"contract": "Translate the following Chinese financial contract into English, ensuring consistency with financial and legal terminology, particularly in capital markets: {text}",
|
|
4293
|
+
"glossary": "Translate the following Chinese text into English using the attached glossary and reference websites: {glossary}, {urls}. Text: {text}"
|
|
4294
|
+
},
|
|
4295
|
+
"review": {
|
|
4296
|
+
"proofread": "Proofread the following Chinese-to-English machine translation. Correct grammar, syntax, and financial terminology errors, and improve readability while maintaining the original meaning: {text}",
|
|
4297
|
+
"localize": "Review the following Chinese-to-English translation as a native English speaker. Adjust any phrases to ensure they sound natural and localized for an international finance audience: {text}",
|
|
4298
|
+
"back_translate": "Back-translate the following English text into Chinese as literally as possible, without interpreting meaning: {text}"
|
|
4299
|
+
},
|
|
4300
|
+
"creative": {
|
|
4301
|
+
"idiom": "How would an English speaker naturally express the Chinese economic idiom: {text}?",
|
|
4302
|
+
"slogan": "Transcreate the following Chinese financial advertising slogan into English. Maintain its persuasive impact and cultural relevance while keeping it concise and memorable. Provide two versions and explain your choices: {text}"
|
|
4303
|
+
}
|
|
4304
|
+
}
|
|
4305
|
+
|
|
4306
|
+
RESULT_ONLY_NOTE = " Only return the translation result without any further explanations."
|
|
4307
|
+
|
|
4308
|
+
def append_result_only(prompts_dict, note=RESULT_ONLY_NOTE):
|
|
4309
|
+
"""
|
|
4310
|
+
Recursively append the note to all prompt strings in a nested dict.
|
|
4311
|
+
"""
|
|
4312
|
+
updated = {}
|
|
4313
|
+
for key, value in prompts_dict.items():
|
|
4314
|
+
if isinstance(value, dict):
|
|
4315
|
+
updated[key] = append_result_only(value, note)
|
|
4316
|
+
elif isinstance(value, str):
|
|
4317
|
+
updated[key] = value.strip() + note
|
|
4318
|
+
else:
|
|
4319
|
+
updated[key] = value
|
|
4320
|
+
return updated
|
|
4321
|
+
|
|
4322
|
+
# Apply it
|
|
4323
|
+
translation_prompts = append_result_only(raw_translation_prompts)
|
|
4324
|
+
|
|
4325
|
+
|
|
4326
|
+
def csv_to_json_append(csv_path: str, json_path: str) -> None:
|
|
4327
|
+
"""
|
|
4328
|
+
Convert a CSV file into a list of dictionaries and append them into a JSON file.
|
|
4329
|
+
|
|
4330
|
+
Args:
|
|
4331
|
+
csv_path (str): Path to the CSV file.
|
|
4332
|
+
json_path (str): Path to the output JSON file.
|
|
4333
|
+
"""
|
|
4334
|
+
|
|
4335
|
+
import pandas as pd
|
|
4336
|
+
|
|
4337
|
+
# Load CSV into DataFrame
|
|
4338
|
+
df = pd.read_csv(csv_path)
|
|
4339
|
+
|
|
4340
|
+
# Automatically get all columns, convert to list of dicts
|
|
4341
|
+
data_list = df.to_dict(orient='records')
|
|
4342
|
+
|
|
4343
|
+
# Append each dict to JSON file
|
|
4344
|
+
for record in data_list:
|
|
4345
|
+
append_dict_to_json(json_path, record)
|
|
4346
|
+
|
|
4347
|
+
print(f"✅ Completed! Appended {len(data_list)} records to {json_path}")
|
|
4348
|
+
|
|
4349
|
+
def get_data_csv(csv_path: str) -> list[dict]:
|
|
4350
|
+
"""
|
|
4351
|
+
Load a CSV file and return its rows as a list of dictionaries.
|
|
4352
|
+
Column names are automatically detected.
|
|
4353
|
+
|
|
4354
|
+
Args:
|
|
4355
|
+
csv_path (str): Path to the CSV file.
|
|
4356
|
+
|
|
4357
|
+
Returns:
|
|
4358
|
+
list[dict]: A list of dictionaries, where each dict represents one row.
|
|
4359
|
+
"""
|
|
4360
|
+
import pandas as pd
|
|
4361
|
+
df = pd.read_csv(csv_path)
|
|
4362
|
+
return df.to_dict(orient="records")
|
PgsFile/__init__.py
CHANGED
|
@@ -11,7 +11,7 @@ from .PgsFile import conda_mirror_commands
|
|
|
11
11
|
|
|
12
12
|
# 3. Text data retrieval
|
|
13
13
|
from .PgsFile import get_data_text, get_data_lines, get_json_lines, get_tsv_lines
|
|
14
|
-
from .PgsFile import get_data_excel, get_data_json, get_data_tsv, extract_misspelled_words_from_docx
|
|
14
|
+
from .PgsFile import get_data_excel, get_data_json, get_data_tsv, get_data_csv, extract_misspelled_words_from_docx
|
|
15
15
|
from .PgsFile import get_data_html_online, get_data_html_offline
|
|
16
16
|
from .PgsFile import get_data_table_url, get_data_table_html_string
|
|
17
17
|
from .PgsFile import mhtml2html
|
|
@@ -33,6 +33,7 @@ from .PgsFile import set_permanent_environment_variable
|
|
|
33
33
|
from .PgsFile import delete_permanent_environment_variable
|
|
34
34
|
from .PgsFile import get_env_variable, get_all_env_variables
|
|
35
35
|
from .PgsFile import get_system_info
|
|
36
|
+
from .PgsFile import csv_to_json_append
|
|
36
37
|
|
|
37
38
|
# 6. Data cleaning
|
|
38
39
|
from .PgsFile import BigPunctuation, StopTags, Special, yhd
|
|
@@ -55,6 +56,7 @@ from .PgsFile import extract_noun_phrases, get_LLMs_prompt, extract_keywords_en,
|
|
|
55
56
|
from .PgsFile import extract_dependency_relations, extract_dependency_relations_full
|
|
56
57
|
from .PgsFile import predict_category
|
|
57
58
|
from .PgsFile import tfidf_keyword_extraction
|
|
59
|
+
from .PgsFile import translation_prompts
|
|
58
60
|
|
|
59
61
|
# 8. Maths
|
|
60
62
|
from .PgsFile import len_rows, check_empty_cells
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
You are an expert text classifier.
|
|
2
|
+
Your task is to classify a given Chinese news text into ONE and ONLY ONE of the following categories:
|
|
3
|
+
|
|
4
|
+
Categories:
|
|
5
|
+
- 港澳台 (Hong Kong, Macao, Taiwan)
|
|
6
|
+
- 房产 (Real Estate)
|
|
7
|
+
- 军事 (Military)
|
|
8
|
+
- 社会 (Society)
|
|
9
|
+
- 财经 (Finance & Economy)
|
|
10
|
+
- 文娱 (Culture & Entertainment)
|
|
11
|
+
- 汽车 (Automobile)
|
|
12
|
+
- 国际 (International News)
|
|
13
|
+
- 教育 (Education)
|
|
14
|
+
- 健康 (Health)
|
|
15
|
+
- 美食 (Food & Cuisine)
|
|
16
|
+
- 时政 (Current Affairs / Politics)
|
|
17
|
+
- 法治 (Law & Legal Affairs)
|
|
18
|
+
- 旅游 (Travel & Tourism)
|
|
19
|
+
- 体育 (Sports)
|
|
20
|
+
- 数码科技 (Digital Technology)
|
|
21
|
+
- 动漫 (Anime & Comics)
|
|
22
|
+
- 反腐前沿 (Anti-Corruption)
|
|
23
|
+
- 国内 (Domestic News within Mainland China)
|
|
24
|
+
|
|
25
|
+
Instructions:
|
|
26
|
+
1. Read the input Chinese text carefully.
|
|
27
|
+
2. Determine the most relevant SINGLE category from the list above.
|
|
28
|
+
3. If multiple categories seem relevant, choose the one that best represents the MAIN topic of the text.
|
|
29
|
+
4. Output ONLY the category name in Chinese (e.g., 财经, 体育), without explanation.
|
|
30
|
+
|
|
31
|
+
Text to classify:
|
|
32
|
+
{text}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: PgsFile
|
|
3
|
+
Version: 0.5.4
|
|
4
|
+
Summary: This module streamlines Python package management, script execution, file handling, web scraping, and multimedia downloads. It supports LLM-based NLP tasks like OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, MIP analysis, text classification, and Chinese-English sentence alignment. Additionally, it generates word lists and data visualizations, making it a practical tool for data scraping and analysis—ideal for literary students and researchers.
|
|
5
|
+
Author-email: Pan Guisheng <panguisheng@sufe.edu.cn>
|
|
6
|
+
License: License :: Free For Educational Use
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2021-present, GIIT, Shanghai International Studies University.
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
|
|
28
|
+
Project-URL: Homepage, https://github.com/Petercusin/PgsFile
|
|
29
|
+
Project-URL: Repository, https://github.com/Petercusin/PgsFile
|
|
30
|
+
Classifier: Programming Language :: Python :: 3
|
|
31
|
+
Classifier: License :: Free For Educational Use
|
|
32
|
+
Classifier: Operating System :: OS Independent
|
|
33
|
+
Requires-Python: >=3.8
|
|
34
|
+
Description-Content-Type: text/markdown
|
|
35
|
+
License-File: LICENSE
|
|
36
|
+
Requires-Dist: chardet
|
|
37
|
+
Requires-Dist: pandas
|
|
38
|
+
Requires-Dist: python-docx
|
|
39
|
+
Requires-Dist: pip
|
|
40
|
+
Requires-Dist: requests
|
|
41
|
+
Requires-Dist: fake-useragent
|
|
42
|
+
Requires-Dist: lxml
|
|
43
|
+
Requires-Dist: pimht
|
|
44
|
+
Requires-Dist: pysbd
|
|
45
|
+
Requires-Dist: nlpir-python
|
|
46
|
+
Requires-Dist: pillow
|
|
47
|
+
Requires-Dist: liwc
|
|
48
|
+
Dynamic: license-file
|
|
49
|
+
|
|
50
|
+
Purpose: This module is designed to make complex tasks accessible and convenient, even for beginners. By providing a unified set of tools, it simplifies the workflow for data collection, processing, and analysis. Whether you're scraping data from the web, cleaning text, or performing LLM-based NLP tasks, this module ensures you can focus on your research without getting bogged down by technical challenges.
|
|
51
|
+
|
|
52
|
+
Key Features:
|
|
53
|
+
1. **Web Scraping:** Easily scrape data from websites and download multimedia content.
|
|
54
|
+
2. **Package Management:** Install, uninstall, and manage Python packages with simple commands.
|
|
55
|
+
3. **Data Retrieval:** Extract data from various file formats like text, JSON, CSV, TSV, XLSX, XML, and HTML (both online and offline).
|
|
56
|
+
4. **Data Storage:** Write and append data to text files, Excel, JSON, TMX, and JSON lines.
|
|
57
|
+
5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, convert CSV to JSON, and search for files with specific keywords.
|
|
58
|
+
6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, convert Markdown strings into Python objects, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary, BE21 and BNC-COCA word lists.
|
|
59
|
+
7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC, MIP analysis, text classification, and Chinese-English sentence alignment using prepared LLM prompts.
|
|
60
|
+
8. **Math Operations:** Format numbers, convert decimals to percentages, and validate data.
|
|
61
|
+
9. **Visualization:** Process images (e.g., make white pixels transparent, resize images) and manage fonts for rendering text.
|
|
62
|
+
|
|
63
|
+
Author: Pan Guisheng, a PhD student at the Graduate Institute of Interpretation and Translation of Shanghai International Studies University
|
|
64
|
+
Email: 895284504@qq.com
|
|
65
|
+
Homepage: https://mp.weixin.qq.com/s/lWMkYDWQMjBJNKY2vMYTpw
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
PgsFile/PgsFile.py,sha256=
|
|
2
|
-
PgsFile/__init__.py,sha256=
|
|
1
|
+
PgsFile/PgsFile.py,sha256=BUJJZFngPXiRQ8NQ7k11DdE6trxJrdbLI8mJU5-MFq4,178575
|
|
2
|
+
PgsFile/__init__.py,sha256=g37kKJxGqD61TQCWg6tDhUr27u-XRuMTIYNUjzRLPQ8,3801
|
|
3
3
|
PgsFile/Corpora/Idioms/English_Idioms_8774.txt,sha256=qlsP0yI_XGECBRiPZuLkGZpdasc77sWSKexANu7v8_M,175905
|
|
4
4
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000000.txt,sha256=SLGGSMSb7Ff1RoBstsTW3yX2wNZpqEUchFNpcI-mrR4,1513
|
|
5
5
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000001.txt,sha256=imOa6UoCOIZoPXT4_HNHgCUJtd4FTIdk2FZNHNBgJyg,3372
|
|
@@ -2587,14 +2587,15 @@ PgsFile/models/fonts/博洋行书3500.TTF,sha256=VrgeHr8cgOL6JD05QyuD9ZSyw4J2aIV
|
|
|
2587
2587
|
PgsFile/models/fonts/陆柬之行书字体.ttf,sha256=Zpd4Z7E9w-Qy74yklXHk4vM7HOtHuQgllvygxZZ1Hvs,1247288
|
|
2588
2588
|
PgsFile/models/prompts/1. MIP prompt.txt,sha256=4lHlHmleayRytqr1n9jtt6vn1rQvyf4BKeThpbwI8o8,1638
|
|
2589
2589
|
PgsFile/models/prompts/2. WSD prompt.txt,sha256=o-ZFtCRUCDrXgm040WTQch9v2Y_r2SIlrZaquilJjgQ,2348
|
|
2590
|
-
PgsFile/models/prompts/3. ICTCLAS
|
|
2590
|
+
PgsFile/models/prompts/3. ICTCLAS prompt.txt,sha256=VFn6N_JViAbyy9NazA8gjX6SGo5mgBcZOf95aC9JB84,592
|
|
2591
2591
|
PgsFile/models/prompts/4. OCR prompt.txt,sha256=YxUQ2IlE52k0fcBnGsuOHqWAmfiEmIu6iRz5zecQ8dk,260
|
|
2592
2592
|
PgsFile/models/prompts/5. ATE prompt.txt,sha256=5wu0gGlsV7DI0LruYM3-uAC6brppyYD0IoiFVjMqm5Y,1553
|
|
2593
2593
|
PgsFile/models/prompts/6. ATE3 prompt.txt,sha256=VnaXpPa6BgZHUcm8PxmP_qgU-8xEoTB3XcBqjwCUy_g,1254
|
|
2594
2594
|
PgsFile/models/prompts/7. SentAlign prompt.txt,sha256=hXpqqC-CAgo8EytkJ0MaLhevLefALazWriY-ew39jxs,1537
|
|
2595
2595
|
PgsFile/models/prompts/8. TitleCase prompt.txt,sha256=4p-LfGy0xAj2uPi9amyMm41T6Z17VNpFFsGZOgWhROs,1136
|
|
2596
|
-
PgsFile
|
|
2597
|
-
|
|
2598
|
-
|
|
2599
|
-
|
|
2600
|
-
|
|
2596
|
+
PgsFile/models/prompts/9. TextClassification prompt.txt,sha256=JhQJu3rQSstNtkIkxPR1K-QmH9sGBEhbVKHAi7ItMUA,1066
|
|
2597
|
+
pgsfile-0.5.4.dist-info/licenses/LICENSE,sha256=cE5c-QToSkG1KTUsU8drQXz1vG0EbJWuU4ybHTRb5SE,1138
|
|
2598
|
+
pgsfile-0.5.4.dist-info/METADATA,sha256=KtGtZ5Q-o3muHqz5OsKp3ZUQz-qwncC-l65-udjesz8,4555
|
|
2599
|
+
pgsfile-0.5.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
2600
|
+
pgsfile-0.5.4.dist-info/top_level.txt,sha256=028hCfwhF3UpfD6X0rwtWpXI1RKSTeZ1ALwagWaSmX8,8
|
|
2601
|
+
pgsfile-0.5.4.dist-info/RECORD,,
|
PgsFile-0.5.2.dist-info/METADATA
DELETED
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: PgsFile
|
|
3
|
-
Version: 0.5.2
|
|
4
|
-
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports (LLM-based) NLP tasks such as OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, MIP analysis and Chinese-English sentence alignment. It also generates word lists, and plots data, aiding literary students. Ideal for scraping data, cleaning text, and analyzing language, it offers user-friendly tools to streamline workflows.
|
|
5
|
-
Home-page: https://github.com/Petercusin/PgsFile
|
|
6
|
-
Author: Pan Guisheng
|
|
7
|
-
Author-email: panguisheng@sufe.edu.cn
|
|
8
|
-
License: Educational free
|
|
9
|
-
Classifier: Programming Language :: Python :: 3
|
|
10
|
-
Classifier: License :: Free For Educational Use
|
|
11
|
-
Classifier: Operating System :: OS Independent
|
|
12
|
-
Requires-Python: >=3.8
|
|
13
|
-
Description-Content-Type: text/markdown
|
|
14
|
-
License-File: LICENSE
|
|
15
|
-
Requires-Dist: chardet
|
|
16
|
-
Requires-Dist: pandas
|
|
17
|
-
Requires-Dist: python-docx
|
|
18
|
-
Requires-Dist: pip
|
|
19
|
-
Requires-Dist: requests
|
|
20
|
-
Requires-Dist: fake-useragent
|
|
21
|
-
Requires-Dist: lxml
|
|
22
|
-
Requires-Dist: pimht
|
|
23
|
-
Requires-Dist: pysbd
|
|
24
|
-
Requires-Dist: nlpir-python
|
|
25
|
-
Requires-Dist: pillow
|
|
26
|
-
Requires-Dist: liwc
|
|
27
|
-
|
|
28
|
-
Purpose: This module is designed to make complex tasks accessible and convenient, even for beginners. By providing a unified set of tools, it simplifies the workflow for data collection, processing, and analysis. Whether you're scraping data from the web, cleaning text, or performing LLM-based NLP tasks, this module ensures you can focus on your research without getting bogged down by technical challenges.
|
|
29
|
-
|
|
30
|
-
Key Features:
|
|
31
|
-
1. **Web Scraping:** Easily scrape data from websites and download multimedia content.
|
|
32
|
-
2. **Package Management:** Install, uninstall, and manage Python packages with simple commands.
|
|
33
|
-
3. **Data Retrieval:** Extract data from various file formats like text, JSON, TSV, Excel, XML, and HTML (both online and offline).
|
|
34
|
-
4. **Data Storage:** Write and append data to text files, Excel, JSON, TMX, and JSON lines.
|
|
35
|
-
5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, and search for files with specific keywords.
|
|
36
|
-
6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, convert Markdown strings into Python objects, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary, BE21 and BNC-COCA word lists.
|
|
37
|
-
7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC, MIP analysis, and Chinese-English sentence alignment using prepared LLM prompts.
|
|
38
|
-
8. **Math Operations:** Format numbers, convert decimals to percentages, and validate data.
|
|
39
|
-
9. **Visualization:** Process images (e.g., make white pixels transparent, resize images) and manage fonts for rendering text.
|
|
40
|
-
|
|
41
|
-
Author: Pan Guisheng, a PhD student at the Graduate Institute of Interpretation and Translation of Shanghai International Studies University
|
|
42
|
-
Email: 895284504@qq.com
|
|
43
|
-
Homepage: https://mp.weixin.qq.com/s/lWMkYDWQMjBJNKY2vMYTpw
|
|
File without changes
|
|
File without changes
|
|
File without changes
|