PgsFile 0.5.3__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of PgsFile might be problematic. Click here for more details.

PgsFile/PgsFile.py CHANGED
@@ -4322,3 +4322,41 @@ def append_result_only(prompts_dict, note=RESULT_ONLY_NOTE):
4322
4322
  # Apply it
4323
4323
  translation_prompts = append_result_only(raw_translation_prompts)
4324
4324
 
4325
+
4326
+ def csv_to_json_append(csv_path: str, json_path: str) -> None:
4327
+ """
4328
+ Convert a CSV file into a list of dictionaries and append them into a JSON file.
4329
+
4330
+ Args:
4331
+ csv_path (str): Path to the CSV file.
4332
+ json_path (str): Path to the output JSON file.
4333
+ """
4334
+
4335
+ import pandas as pd
4336
+
4337
+ # Load CSV into DataFrame
4338
+ df = pd.read_csv(csv_path)
4339
+
4340
+ # Automatically get all columns, convert to list of dicts
4341
+ data_list = df.to_dict(orient='records')
4342
+
4343
+ # Append each dict to JSON file
4344
+ for record in data_list:
4345
+ append_dict_to_json(json_path, record)
4346
+
4347
+ print(f"✅ Completed! Appended {len(data_list)} records to {json_path}")
4348
+
4349
+ def get_data_csv(csv_path: str) -> list[dict]:
4350
+ """
4351
+ Load a CSV file and return its rows as a list of dictionaries.
4352
+ Column names are automatically detected.
4353
+
4354
+ Args:
4355
+ csv_path (str): Path to the CSV file.
4356
+
4357
+ Returns:
4358
+ list[dict]: A list of dictionaries, where each dict represents one row.
4359
+ """
4360
+ import pandas as pd
4361
+ df = pd.read_csv(csv_path)
4362
+ return df.to_dict(orient="records")
PgsFile/__init__.py CHANGED
@@ -11,7 +11,7 @@ from .PgsFile import conda_mirror_commands
11
11
 
12
12
  # 3. Text data retrieval
13
13
  from .PgsFile import get_data_text, get_data_lines, get_json_lines, get_tsv_lines
14
- from .PgsFile import get_data_excel, get_data_json, get_data_tsv, extract_misspelled_words_from_docx
14
+ from .PgsFile import get_data_excel, get_data_json, get_data_tsv, get_data_csv, extract_misspelled_words_from_docx
15
15
  from .PgsFile import get_data_html_online, get_data_html_offline
16
16
  from .PgsFile import get_data_table_url, get_data_table_html_string
17
17
  from .PgsFile import mhtml2html
@@ -33,6 +33,7 @@ from .PgsFile import set_permanent_environment_variable
33
33
  from .PgsFile import delete_permanent_environment_variable
34
34
  from .PgsFile import get_env_variable, get_all_env_variables
35
35
  from .PgsFile import get_system_info
36
+ from .PgsFile import csv_to_json_append
36
37
 
37
38
  # 6. Data cleaning
38
39
  from .PgsFile import BigPunctuation, StopTags, Special, yhd
@@ -0,0 +1,32 @@
1
+ You are an expert text classifier.
2
+ Your task is to classify a given Chinese news text into ONE and ONLY ONE of the following categories:
3
+
4
+ Categories:
5
+ - 港澳台 (Hong Kong, Macao, Taiwan)
6
+ - 房产 (Real Estate)
7
+ - 军事 (Military)
8
+ - 社会 (Society)
9
+ - 财经 (Finance & Economy)
10
+ - 文娱 (Culture & Entertainment)
11
+ - 汽车 (Automobile)
12
+ - 国际 (International News)
13
+ - 教育 (Education)
14
+ - 健康 (Health)
15
+ - 美食 (Food & Cuisine)
16
+ - 时政 (Current Affairs / Politics)
17
+ - 法治 (Law & Legal Affairs)
18
+ - 旅游 (Travel & Tourism)
19
+ - 体育 (Sports)
20
+ - 数码科技 (Digital Technology)
21
+ - 动漫 (Anime & Comics)
22
+ - 反腐前沿 (Anti-Corruption)
23
+ - 国内 (Domestic News within Mainland China)
24
+
25
+ Instructions:
26
+ 1. Read the input Chinese text carefully.
27
+ 2. Determine the most relevant SINGLE category from the list above.
28
+ 3. If multiple categories seem relevant, choose the one that best represents the MAIN topic of the text.
29
+ 4. Output ONLY the category name in Chinese (e.g., 财经, 体育), without explanation.
30
+
31
+ Text to classify:
32
+ {text}
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: PgsFile
3
- Version: 0.5.3
4
- Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports (LLM-based) NLP tasks such as OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, MIP analysis and Chinese-English sentence alignment. It also generates word lists, and plots data, aiding literary students. Ideal for scraping data, cleaning text, and analyzing language, it offers user-friendly tools to streamline workflows.
3
+ Version: 0.5.4
4
+ Summary: This module streamlines Python package management, script execution, file handling, web scraping, and multimedia downloads. It supports LLM-based NLP tasks like OCR, tokenization, lemmatization, POS tagging, NER, ATE, dependency parsing, MDD, WSD, LIWC, MIP analysis, text classification, and Chinese-English sentence alignment. Additionally, it generates word lists and data visualizations, making it a practical tool for data scraping and analysis—ideal for literary students and researchers.
5
5
  Author-email: Pan Guisheng <panguisheng@sufe.edu.cn>
6
6
  License: License :: Free For Educational Use
7
7
 
@@ -52,11 +52,11 @@ Purpose: This module is designed to make complex tasks accessible and convenient
52
52
  Key Features:
53
53
  1. **Web Scraping:** Easily scrape data from websites and download multimedia content.
54
54
  2. **Package Management:** Install, uninstall, and manage Python packages with simple commands.
55
- 3. **Data Retrieval:** Extract data from various file formats like text, JSON, TSV, Excel, XML, and HTML (both online and offline).
55
+ 3. **Data Retrieval:** Extract data from various file formats like text, JSON, CSV, TSV, XLSX, XML, and HTML (both online and offline).
56
56
  4. **Data Storage:** Write and append data to text files, Excel, JSON, TMX, and JSON lines.
57
- 5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, and search for files with specific keywords.
57
+ 5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, convert CSV to JSON, and search for files with specific keywords.
58
58
  6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, convert Markdown strings into Python objects, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary, BE21 and BNC-COCA word lists.
59
- 7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC, MIP analysis, and Chinese-English sentence alignment using prepared LLM prompts.
59
+ 7. **NLP:** Perform OCR, word tokenization, lemmatization, POS tagging, NER, dependency parsing, ATE, MDD, WSD, LIWC, MIP analysis, text classification, and Chinese-English sentence alignment using prepared LLM prompts.
60
60
  8. **Math Operations:** Format numbers, convert decimals to percentages, and validate data.
61
61
  9. **Visualization:** Process images (e.g., make white pixels transparent, resize images) and manage fonts for rendering text.
62
62
 
@@ -1,5 +1,5 @@
1
- PgsFile/PgsFile.py,sha256=5YZph6RctA_9n4xRvsXqBIVzF70Z0sB126o-dsMYqeU,177437
2
- PgsFile/__init__.py,sha256=C9Og4ITZ_AGJ4APkmzkYvv75v0mePBppw984skOZSq0,3746
1
+ PgsFile/PgsFile.py,sha256=BUJJZFngPXiRQ8NQ7k11DdE6trxJrdbLI8mJU5-MFq4,178575
2
+ PgsFile/__init__.py,sha256=g37kKJxGqD61TQCWg6tDhUr27u-XRuMTIYNUjzRLPQ8,3801
3
3
  PgsFile/Corpora/Idioms/English_Idioms_8774.txt,sha256=qlsP0yI_XGECBRiPZuLkGZpdasc77sWSKexANu7v8_M,175905
4
4
  PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000000.txt,sha256=SLGGSMSb7Ff1RoBstsTW3yX2wNZpqEUchFNpcI-mrR4,1513
5
5
  PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000001.txt,sha256=imOa6UoCOIZoPXT4_HNHgCUJtd4FTIdk2FZNHNBgJyg,3372
@@ -2587,14 +2587,15 @@ PgsFile/models/fonts/博洋行书3500.TTF,sha256=VrgeHr8cgOL6JD05QyuD9ZSyw4J2aIV
2587
2587
  PgsFile/models/fonts/陆柬之行书字体.ttf,sha256=Zpd4Z7E9w-Qy74yklXHk4vM7HOtHuQgllvygxZZ1Hvs,1247288
2588
2588
  PgsFile/models/prompts/1. MIP prompt.txt,sha256=4lHlHmleayRytqr1n9jtt6vn1rQvyf4BKeThpbwI8o8,1638
2589
2589
  PgsFile/models/prompts/2. WSD prompt.txt,sha256=o-ZFtCRUCDrXgm040WTQch9v2Y_r2SIlrZaquilJjgQ,2348
2590
- PgsFile/models/prompts/3. ICTCLAS Prompt.txt,sha256=VFn6N_JViAbyy9NazA8gjX6SGo5mgBcZOf95aC9JB84,592
2590
+ PgsFile/models/prompts/3. ICTCLAS prompt.txt,sha256=VFn6N_JViAbyy9NazA8gjX6SGo5mgBcZOf95aC9JB84,592
2591
2591
  PgsFile/models/prompts/4. OCR prompt.txt,sha256=YxUQ2IlE52k0fcBnGsuOHqWAmfiEmIu6iRz5zecQ8dk,260
2592
2592
  PgsFile/models/prompts/5. ATE prompt.txt,sha256=5wu0gGlsV7DI0LruYM3-uAC6brppyYD0IoiFVjMqm5Y,1553
2593
2593
  PgsFile/models/prompts/6. ATE3 prompt.txt,sha256=VnaXpPa6BgZHUcm8PxmP_qgU-8xEoTB3XcBqjwCUy_g,1254
2594
2594
  PgsFile/models/prompts/7. SentAlign prompt.txt,sha256=hXpqqC-CAgo8EytkJ0MaLhevLefALazWriY-ew39jxs,1537
2595
2595
  PgsFile/models/prompts/8. TitleCase prompt.txt,sha256=4p-LfGy0xAj2uPi9amyMm41T6Z17VNpFFsGZOgWhROs,1136
2596
- pgsfile-0.5.3.dist-info/licenses/LICENSE,sha256=cE5c-QToSkG1KTUsU8drQXz1vG0EbJWuU4ybHTRb5SE,1138
2597
- pgsfile-0.5.3.dist-info/METADATA,sha256=qGxKrMr7r_TWpeWb-slCs7bX95jhwykg5i2nWO_h6tk,4522
2598
- pgsfile-0.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2599
- pgsfile-0.5.3.dist-info/top_level.txt,sha256=028hCfwhF3UpfD6X0rwtWpXI1RKSTeZ1ALwagWaSmX8,8
2600
- pgsfile-0.5.3.dist-info/RECORD,,
2596
+ PgsFile/models/prompts/9. TextClassification prompt.txt,sha256=JhQJu3rQSstNtkIkxPR1K-QmH9sGBEhbVKHAi7ItMUA,1066
2597
+ pgsfile-0.5.4.dist-info/licenses/LICENSE,sha256=cE5c-QToSkG1KTUsU8drQXz1vG0EbJWuU4ybHTRb5SE,1138
2598
+ pgsfile-0.5.4.dist-info/METADATA,sha256=KtGtZ5Q-o3muHqz5OsKp3ZUQz-qwncC-l65-udjesz8,4555
2599
+ pgsfile-0.5.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2600
+ pgsfile-0.5.4.dist-info/top_level.txt,sha256=028hCfwhF3UpfD6X0rwtWpXI1RKSTeZ1ALwagWaSmX8,8
2601
+ pgsfile-0.5.4.dist-info/RECORD,,