PgsFile 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of PgsFile might be problematic. Click here for more details.
- PgsFile/PgsFile.py +129 -0
- PgsFile/__init__.py +2 -2
- {PgsFile-0.3.6.dist-info → PgsFile-0.3.8.dist-info}/METADATA +4 -3
- {PgsFile-0.3.6.dist-info → PgsFile-0.3.8.dist-info}/RECORD +7 -7
- {PgsFile-0.3.6.dist-info → PgsFile-0.3.8.dist-info}/LICENSE +0 -0
- {PgsFile-0.3.6.dist-info → PgsFile-0.3.8.dist-info}/WHEEL +0 -0
- {PgsFile-0.3.6.dist-info → PgsFile-0.3.8.dist-info}/top_level.txt +0 -0
PgsFile/PgsFile.py
CHANGED
|
@@ -3667,3 +3667,132 @@ def timeit(func):
|
|
|
3667
3667
|
return result
|
|
3668
3668
|
return wrapper
|
|
3669
3669
|
|
|
3670
|
+
def file_to_list_of_dicts(input_path, output_path):
|
|
3671
|
+
"""
|
|
3672
|
+
Convert a CSV or XLSX file into a JSON file, where each line in the JSON file is a dictionary representing a row from the input file.
|
|
3673
|
+
The keys of each dictionary are formatted as "column1, column2, column3, etc."
|
|
3674
|
+
|
|
3675
|
+
Args:
|
|
3676
|
+
input_path (str): Path to the CSV or XLSX file.
|
|
3677
|
+
output_path (str): Path to the output JSON file.
|
|
3678
|
+
|
|
3679
|
+
Returns:
|
|
3680
|
+
None: The function writes the dictionaries to a JSON file specified by output_path.
|
|
3681
|
+
"""
|
|
3682
|
+
# Determine the file type based on the file extension
|
|
3683
|
+
import pandas as pd
|
|
3684
|
+
if input_path.endswith('.csv'):
|
|
3685
|
+
df = pd.read_csv(input_path)
|
|
3686
|
+
elif input_path.endswith('.xlsx'):
|
|
3687
|
+
df = pd.read_excel(input_path)
|
|
3688
|
+
else:
|
|
3689
|
+
raise ValueError("Unsupported file format. Please provide a CSV or XLSX file.")
|
|
3690
|
+
|
|
3691
|
+
# Use default keys "col1, col2, etc."
|
|
3692
|
+
total = len(list(df.iterrows()))
|
|
3693
|
+
for index, row in df.iterrows():
|
|
3694
|
+
row_dict = {f"col{i+1}": value for i, value in enumerate(row)}
|
|
3695
|
+
append_dict_to_json(output_path, row_dict)
|
|
3696
|
+
print(f'Converting {decimal_to_percent(index/total)}')
|
|
3697
|
+
|
|
3698
|
+
print("Conversion complete!")
|
|
3699
|
+
|
|
3700
|
+
|
|
3701
|
+
import liwc
|
|
3702
|
+
import json
|
|
3703
|
+
def perform_liwc_en(dic_path, file_path, output_excel_path):
|
|
3704
|
+
'''
|
|
3705
|
+
Parameters
|
|
3706
|
+
----------
|
|
3707
|
+
dic_path : str
|
|
3708
|
+
Path to the LIWC dictionary file.
|
|
3709
|
+
file_path : str
|
|
3710
|
+
Path to the raw text file.
|
|
3711
|
+
output_excel_path : str
|
|
3712
|
+
Path to the output Excel file.
|
|
3713
|
+
'''
|
|
3714
|
+
parse, category_names = liwc.load_token_parser(dic_path)
|
|
3715
|
+
test = get_data_text(file_path)
|
|
3716
|
+
test_tokens = [w.lower() for w in word_tokenize2(test)]
|
|
3717
|
+
my_word_list = dict(word_list(test_tokens))
|
|
3718
|
+
corpus_scale = sum(my_word_list.values())
|
|
3719
|
+
|
|
3720
|
+
labels = {'function': '1', 'pronoun': '2', 'ppron': '3', 'i': '4', 'we': '5', 'you': '6', 'shehe': '7', 'they': '8', 'ipron': '9', 'article': '10', 'prep': '11', 'auxverb': '12', 'adverb': '13', 'conj': '14', 'negate': '15', 'verb': '20', 'adj': '21', 'compare': '22', 'interrog': '23', 'number': '24', 'quant': '25', 'affect': '30', 'posemo': '31', 'negemo': '32', 'anx': '33', 'anger': '34', 'sad': '35', 'social': '40', 'family': '41', 'friend': '42', 'female': '43', 'male': '44', 'cogproc': '50', 'insight': '51', 'cause': '52', 'discrep': '53', 'tentat': '54', 'certain': '55', 'differ': '56', 'percept': '60', 'see': '61', 'hear': '62', 'feel': '63', 'bio': '70', 'body': '71', 'health': '72', 'sexual': '73', 'ingest': '74', 'drives': '80', 'affiliation': '81', 'achiev': '82', 'power': '83', 'reward': '84', 'risk': '85', 'focuspast': '90', 'focuspresent': '91', 'focusfuture': '92', 'relativ': '100', 'motion': '101', 'space': '102', 'time': '103', 'work': '110', 'leisure': '111', 'home': '112', 'money': '113', 'relig': '114', 'death': '115', 'informal': '120', 'swear': '121', 'netspeak': '122', 'assent': '123', 'nonflu': '124', 'filler': '125', 'punctuation':'126'}
|
|
3721
|
+
|
|
3722
|
+
# 第一列:类别标签
|
|
3723
|
+
# 第二列:某类别出现的词种数
|
|
3724
|
+
# 第三列:某类别出现的词次数
|
|
3725
|
+
# 第四列:考察文本语料库的总词次数
|
|
3726
|
+
# 第五列:覆盖率
|
|
3727
|
+
# 第六列:例词,包含某个词的频次降序排列
|
|
3728
|
+
|
|
3729
|
+
def get_category_info(x):
|
|
3730
|
+
category_words = []
|
|
3731
|
+
category_words_freq = 0
|
|
3732
|
+
for w in my_word_list:
|
|
3733
|
+
categories = parse(w)
|
|
3734
|
+
if x in categories:
|
|
3735
|
+
category_words.append([w, my_word_list[w]])
|
|
3736
|
+
category_words_freq += my_word_list[w]
|
|
3737
|
+
|
|
3738
|
+
final = sorted(category_words, key=lambda x: x[1], reverse=True)
|
|
3739
|
+
json_string = json.dumps(final)
|
|
3740
|
+
return [len(category_words), category_words_freq, corpus_scale, decimal_to_percent(category_words_freq / corpus_scale), json_string]
|
|
3741
|
+
|
|
3742
|
+
data = []
|
|
3743
|
+
labels_list = list(labels.keys())
|
|
3744
|
+
for i in labels_list:
|
|
3745
|
+
rs = get_category_info(i)
|
|
3746
|
+
data.append([i] + rs)
|
|
3747
|
+
|
|
3748
|
+
import pandas as pd
|
|
3749
|
+
df = pd.DataFrame(data, columns=[u'类别', u'出现词种数', u'出现词次', u'总词次', u'覆盖率', u'例词'])
|
|
3750
|
+
df.to_excel(output_excel_path, 'sheet1', index=False)
|
|
3751
|
+
|
|
3752
|
+
|
|
3753
|
+
def perform_liwc_zh(dic_path, file_path, output_excel_path):
|
|
3754
|
+
'''
|
|
3755
|
+
Parameters
|
|
3756
|
+
----------
|
|
3757
|
+
dic_path : str
|
|
3758
|
+
Path to the LIWC dictionary json file.
|
|
3759
|
+
file_path : str
|
|
3760
|
+
Path to the raw text file.
|
|
3761
|
+
output_excel_path : str
|
|
3762
|
+
Path to the output Excel file.
|
|
3763
|
+
'''
|
|
3764
|
+
|
|
3765
|
+
f=open(dic_path,"r")
|
|
3766
|
+
dicx=json.load(f)
|
|
3767
|
+
|
|
3768
|
+
test = get_data_text(file_path)
|
|
3769
|
+
test_tokens = word_tokenize(test)
|
|
3770
|
+
my_word_list=dict(word_list(test_tokens))
|
|
3771
|
+
corpus_scale=sum(my_word_list.values())
|
|
3772
|
+
|
|
3773
|
+
labels=['Entry:词条','function:功能词','pronoun:代名词','ppron:特定人称代名词','i:第一人称单数代名词','we:第一人称复数代名词','you:第二人称代名词','shehe:第三人称单数代名词','they:第三人称复数代名词','youpl:第二人称复数代名词','ipron:非特定人称代名词','prep:介系词','auxverb:助动词','adverb:副词','conj:连接词','negate:否定词','quanunit:量词','prepend:后置词','specart:特指定词','particle:小品词','modal_pa:语气词','general_pa','compare:比较词','interrog:疑问词','number:数字','quant:概数词','affect:情感历程词','posemo:正向情绪词','negemo:负向情绪词','anx:焦虑词','anger:生气词','sad:悲伤词','social:社会历程词','family:家族词','friend:朋友词','female:女性词','male:男性词','cogproc','insight:洞察词','cause:因果词','discrep:差距词','tentat:暂定词','certain:确切词','differ','percept:感知历程词','see:视觉词','hear:听觉词','feel:感觉词','bio:生理历程词','body:身体词','health:健康词','sexual:性词','ingest:摄食词','drives','affiliation','achieve:成就词','power','reward','risk','tensem:时态标定词','focuspast:过去时态标定词','focuspresent:现在时态标定词','focusfuture:未来时态标定词','progm:延续时态标定词','relativ:相对词','motion:移动词','space:空间词','time:时间词','work:工作词','leisure:休闲词','home:家庭词','money:金钱词','relig:宗教词','death:死亡词','informal','swear:脏话','netspeak:网络用语','assent:应和词','nonflu:停顿赘词','filler:填充赘词','punctuation:标点符号']
|
|
3774
|
+
# target_index=labels.index("swear") #75
|
|
3775
|
+
|
|
3776
|
+
def get_category_info(x):
|
|
3777
|
+
category_words=[]
|
|
3778
|
+
category_words_indicx=[]
|
|
3779
|
+
category_words_freq=0
|
|
3780
|
+
for w in dicx:
|
|
3781
|
+
if dicx[w][x]=="1":
|
|
3782
|
+
category_words_indicx.append(w)
|
|
3783
|
+
if w in my_word_list:
|
|
3784
|
+
category_words_freq+=my_word_list[w]
|
|
3785
|
+
category_words.append([w,my_word_list[w]])
|
|
3786
|
+
final=sorted(category_words,key=lambda x: x[1],reverse=True)
|
|
3787
|
+
json_string = json.dumps(final, ensure_ascii=False)
|
|
3788
|
+
return [len(category_words),decimal_to_percent(len(category_words)/len(category_words_indicx)),category_words_freq,corpus_scale, decimal_to_percent(category_words_freq/corpus_scale), json_string]
|
|
3789
|
+
|
|
3790
|
+
data=[]
|
|
3791
|
+
for i in labels[1::]:
|
|
3792
|
+
rs=get_category_info(labels[1::].index(i))
|
|
3793
|
+
data.append([i]+rs)
|
|
3794
|
+
|
|
3795
|
+
|
|
3796
|
+
import pandas as pd
|
|
3797
|
+
df = pd.DataFrame(data,columns=[u'类别', u'出现词种数', u'占词表百分比', u'出现词次', u'总词次', u'覆盖率', u'例词'])
|
|
3798
|
+
df.to_excel(output_excel_path,'sheet1',index=False)
|
PgsFile/__init__.py
CHANGED
|
@@ -17,7 +17,7 @@ from .PgsFile import get_data_table_url, get_data_table_html_string
|
|
|
17
17
|
from .PgsFile import mhtml2html
|
|
18
18
|
|
|
19
19
|
# 4. Text data storage
|
|
20
|
-
from .PgsFile import write_to_txt, write_to_excel, write_to_json, write_to_json_lines, append_dict_to_json, save_dict_to_excel
|
|
20
|
+
from .PgsFile import write_to_txt, write_to_excel, write_to_json, write_to_json_lines, append_dict_to_json, save_dict_to_excel, file_to_list_of_dicts
|
|
21
21
|
from .PgsFile import write_to_excel_normal
|
|
22
22
|
|
|
23
23
|
# 5. File/folder process
|
|
@@ -45,7 +45,7 @@ from .PgsFile import extract_chinese_punctuation, generate_password, sort_string
|
|
|
45
45
|
|
|
46
46
|
# 7. NLP (natural language processing)
|
|
47
47
|
from .PgsFile import strQ2B_raw, strQ2B_words
|
|
48
|
-
from .PgsFile import ngrams, bigrams, trigrams, everygrams, compute_similarity
|
|
48
|
+
from .PgsFile import ngrams, bigrams, trigrams, everygrams, compute_similarity, perform_liwc_en, perform_liwc_zh
|
|
49
49
|
from .PgsFile import word_list, batch_word_list
|
|
50
50
|
from .PgsFile import cs, cs1, sent_tokenize, word_tokenize, word_tokenize2
|
|
51
51
|
from .PgsFile import word_lemmatize, word_POS, word_NER
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: PgsFile
|
|
3
|
-
Version: 0.3.
|
|
4
|
-
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports LLM-based NLP tasks such as tokenization, lemmatization, POS tagging, NER, dependency parsing, MDD, WSD, and MIP analysis. It also generates word lists and plots data, aiding literary students. Ideal for scraping data, cleaning text, and analyzing language, it offers user-friendly tools to streamline workflows.
|
|
3
|
+
Version: 0.3.8
|
|
4
|
+
Summary: This module simplifies Python package management, script execution, file handling, web scraping, and multimedia downloads. The module supports LLM-based NLP tasks such as tokenization, lemmatization, POS tagging, NER, dependency parsing, MDD, WSD, LIWC, and MIP analysis. It also generates word lists and plots data, aiding literary students. Ideal for scraping data, cleaning text, and analyzing language, it offers user-friendly tools to streamline workflows.
|
|
5
5
|
Home-page: https://mp.weixin.qq.com/s/12-KVLfaPszoZkCxuRd-nQ?token=1589547443&lang=zh_CN
|
|
6
6
|
Author: Pan Guisheng
|
|
7
7
|
Author-email: 895284504@qq.com
|
|
@@ -23,6 +23,7 @@ Requires-Dist: pimht
|
|
|
23
23
|
Requires-Dist: pysbd
|
|
24
24
|
Requires-Dist: nlpir-python
|
|
25
25
|
Requires-Dist: pillow
|
|
26
|
+
Requires-Dist: liwc
|
|
26
27
|
|
|
27
28
|
Purpose: This module is designed to make complex tasks accessible and convenient, even for beginners. By providing a unified set of tools, it simplifies the workflow for data collection, processing, and analysis. Whether you're scraping data from the web, cleaning text, or performing LLM-based NLP tasks, this module ensures you can focus on your research without getting bogged down by technical challenges.
|
|
28
29
|
|
|
@@ -33,7 +34,7 @@ Key Features:
|
|
|
33
34
|
4. **Data Storage:** Write and append data to text files, Excel, JSON, and JSON lines.
|
|
34
35
|
5. **File and Folder Processing:** Manage file paths, create directories, move or copy files, and search for files with specific keywords.
|
|
35
36
|
6. **Data Cleaning:** Clean text, handle punctuation, remove stopwords, and prepare data for analysis, utilizing valuable corpora and dictionaries such as CET-4/6 vocabulary and BNC-COCA word lists.
|
|
36
|
-
7. **NLP:** Perform word tokenization, lemmatization, POS tagging, NER, dependency parsing, MDD, WSD, and MIP analysis using prepared LLM prompts.
|
|
37
|
+
7. **NLP:** Perform word tokenization, lemmatization, POS tagging, NER, dependency parsing, MDD, WSD, LIWC, and MIP analysis using prepared LLM prompts.
|
|
37
38
|
8. **Math Operations:** Format numbers, convert decimals to percentages, and validate data.
|
|
38
39
|
9. **Visualization:** Process images (e.g., make white pixels transparent) and manage fonts for rendering text.
|
|
39
40
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
PgsFile/PgsFile.py,sha256=
|
|
2
|
-
PgsFile/__init__.py,sha256=
|
|
1
|
+
PgsFile/PgsFile.py,sha256=MgXheIxpcTcq6vf7t-Ak6JwVmoamS9ouDuOUDeaYpUY,157289
|
|
2
|
+
PgsFile/__init__.py,sha256=c4v2XTQjk5_ato5qeLzrS6mYm7tHR_V-Pb0QFgJHXJg,3465
|
|
3
3
|
PgsFile/Corpora/Idioms/English_Idioms_8774.txt,sha256=qlsP0yI_XGECBRiPZuLkGZpdasc77sWSKexANu7v8_M,175905
|
|
4
4
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000000.txt,sha256=SLGGSMSb7Ff1RoBstsTW3yX2wNZpqEUchFNpcI-mrR4,1513
|
|
5
5
|
PgsFile/Corpora/Monolingual/Chinese/People's Daily 20130605/Raw/00000001.txt,sha256=imOa6UoCOIZoPXT4_HNHgCUJtd4FTIdk2FZNHNBgJyg,3372
|
|
@@ -2585,8 +2585,8 @@ PgsFile/models/fonts/博洋行书3500.TTF,sha256=VrgeHr8cgOL6JD05QyuD9ZSyw4J2aIV
|
|
|
2585
2585
|
PgsFile/models/fonts/陆柬之行书字体.ttf,sha256=Zpd4Z7E9w-Qy74yklXHk4vM7HOtHuQgllvygxZZ1Hvs,1247288
|
|
2586
2586
|
PgsFile/models/prompts/1. MIP prompt.txt,sha256=4lHlHmleayRytqr1n9jtt6vn1rQvyf4BKeThpbwI8o8,1638
|
|
2587
2587
|
PgsFile/models/prompts/2. WSD prompt.txt,sha256=o-ZFtCRUCDrXgm040WTQch9v2Y_r2SIlrZaquilJjgQ,2348
|
|
2588
|
-
PgsFile-0.3.
|
|
2589
|
-
PgsFile-0.3.
|
|
2590
|
-
PgsFile-0.3.
|
|
2591
|
-
PgsFile-0.3.
|
|
2592
|
-
PgsFile-0.3.
|
|
2588
|
+
PgsFile-0.3.8.dist-info/LICENSE,sha256=cE5c-QToSkG1KTUsU8drQXz1vG0EbJWuU4ybHTRb5SE,1138
|
|
2589
|
+
PgsFile-0.3.8.dist-info/METADATA,sha256=TmXJtWuYSG4YSbsPylK2gKNip0QtDmd7NCRG84zb6Lk,2925
|
|
2590
|
+
PgsFile-0.3.8.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
2591
|
+
PgsFile-0.3.8.dist-info/top_level.txt,sha256=028hCfwhF3UpfD6X0rwtWpXI1RKSTeZ1ALwagWaSmX8,8
|
|
2592
|
+
PgsFile-0.3.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|