Muyi 0.0.7__tar.gz → 0.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,101 @@
1
+ Metadata-Version: 2.4
2
+ Name: Muyi
3
+ Version: 0.0.9
4
+ Summary: Some useful utils.
5
+ Home-page: https://github.com/Muyiiiii/muyi
6
+ Author: muyiiiii
7
+ Author-email:
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Provides-Extra: plot
15
+ Requires-Dist: matplotlib; extra == "plot"
16
+ Provides-Extra: csv
17
+ Requires-Dist: pandas; extra == "csv"
18
+ Requires-Dist: tqdm; extra == "csv"
19
+ Provides-Extra: gpu
20
+ Requires-Dist: GPUtil; extra == "gpu"
21
+ Provides-Extra: graph
22
+ Requires-Dist: torch; extra == "graph"
23
+ Requires-Dist: dgl; extra == "graph"
24
+ Provides-Extra: all
25
+ Requires-Dist: matplotlib; extra == "all"
26
+ Requires-Dist: pandas; extra == "all"
27
+ Requires-Dist: tqdm; extra == "all"
28
+ Requires-Dist: GPUtil; extra == "all"
29
+ Requires-Dist: torch; extra == "all"
30
+ Requires-Dist: dgl; extra == "all"
31
+ Dynamic: author
32
+ Dynamic: classifier
33
+ Dynamic: description
34
+ Dynamic: description-content-type
35
+ Dynamic: home-page
36
+ Dynamic: license-file
37
+ Dynamic: provides-extra
38
+ Dynamic: requires-python
39
+ Dynamic: summary
40
+
41
+ # muyi
42
+
43
+ Some useful utils for GNNs and Deep Learning.
44
+
45
+ ## Installation
46
+
47
+ ```bash
48
+ pip install muyi # basic installation
49
+ pip install muyi[plot] # + matplotlib
50
+ pip install muyi[csv] # + pandas, tqdm
51
+ pip install muyi[gpu] # + GPUtil
52
+ pip install muyi[graph] # + torch, dgl
53
+ pip install muyi[all] # all dependencies
54
+ ```
55
+
56
+ ## utils
57
+
58
+ 1. `color_print(content, font_color, bg_color)`
59
+ 2. `save_pic_iterly(pic_name, postfix, info)`
60
+ 3. `read_csv_tqdm(path, **kwargs)`
61
+ 4. `save_result_csv(csv_path, data)`
62
+ 5. `get_unique_save_path(folder_path, base_name_pattern, start_no)`
63
+
64
+ ## graph
65
+
66
+ 1. `pyg_data_to_dgl_graph(pyg_data_obj)`
67
+
68
+ ## gpu
69
+
70
+ 1. `get_gpu_memory_usage()`
71
+ 2. `display_gpu_memory_usage()`
72
+
73
+ ## Upload to PyPI
74
+
75
+ ```bash
76
+ # 1. Install build tools
77
+ pip install build twine
78
+
79
+ # 2. Build package
80
+ python -m build
81
+
82
+ # 3. Upload to PyPI
83
+ twine upload dist/*
84
+ # Username: __token__
85
+ # Password: your PyPI API Token (starts with pypi-)
86
+ ```
87
+
88
+ Get API Token: https://pypi.org/manage/account/token/
89
+
90
+ ### Save Token Locally (Optional)
91
+
92
+ Create `~/.pypirc` file to avoid entering credentials each time:
93
+
94
+ ```ini
95
+ [pypi]
96
+ username = __token__
97
+ password = pypi-your-token-here
98
+ ```
99
+
100
+ - **Linux/macOS**: `~/.pypirc`
101
+ - **Windows**: `C:\Users\<username>\.pypirc`
@@ -5,6 +5,7 @@ setup.py
5
5
  Muyi.egg-info/PKG-INFO
6
6
  Muyi.egg-info/SOURCES.txt
7
7
  Muyi.egg-info/dependency_links.txt
8
+ Muyi.egg-info/requires.txt
8
9
  Muyi.egg-info/top_level.txt
9
10
  muyi/__init__.py
10
11
  muyi/gpu.py
@@ -0,0 +1,22 @@
1
+
2
+ [all]
3
+ matplotlib
4
+ pandas
5
+ tqdm
6
+ GPUtil
7
+ torch
8
+ dgl
9
+
10
+ [csv]
11
+ pandas
12
+ tqdm
13
+
14
+ [gpu]
15
+ GPUtil
16
+
17
+ [graph]
18
+ torch
19
+ dgl
20
+
21
+ [plot]
22
+ matplotlib
muyi-0.0.9/PKG-INFO ADDED
@@ -0,0 +1,101 @@
1
+ Metadata-Version: 2.4
2
+ Name: Muyi
3
+ Version: 0.0.9
4
+ Summary: Some useful utils.
5
+ Home-page: https://github.com/Muyiiiii/muyi
6
+ Author: muyiiiii
7
+ Author-email:
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Provides-Extra: plot
15
+ Requires-Dist: matplotlib; extra == "plot"
16
+ Provides-Extra: csv
17
+ Requires-Dist: pandas; extra == "csv"
18
+ Requires-Dist: tqdm; extra == "csv"
19
+ Provides-Extra: gpu
20
+ Requires-Dist: GPUtil; extra == "gpu"
21
+ Provides-Extra: graph
22
+ Requires-Dist: torch; extra == "graph"
23
+ Requires-Dist: dgl; extra == "graph"
24
+ Provides-Extra: all
25
+ Requires-Dist: matplotlib; extra == "all"
26
+ Requires-Dist: pandas; extra == "all"
27
+ Requires-Dist: tqdm; extra == "all"
28
+ Requires-Dist: GPUtil; extra == "all"
29
+ Requires-Dist: torch; extra == "all"
30
+ Requires-Dist: dgl; extra == "all"
31
+ Dynamic: author
32
+ Dynamic: classifier
33
+ Dynamic: description
34
+ Dynamic: description-content-type
35
+ Dynamic: home-page
36
+ Dynamic: license-file
37
+ Dynamic: provides-extra
38
+ Dynamic: requires-python
39
+ Dynamic: summary
40
+
41
+ # muyi
42
+
43
+ Some useful utils for GNNs and Deep Learning.
44
+
45
+ ## Installation
46
+
47
+ ```bash
48
+ pip install muyi # basic installation
49
+ pip install muyi[plot] # + matplotlib
50
+ pip install muyi[csv] # + pandas, tqdm
51
+ pip install muyi[gpu] # + GPUtil
52
+ pip install muyi[graph] # + torch, dgl
53
+ pip install muyi[all] # all dependencies
54
+ ```
55
+
56
+ ## utils
57
+
58
+ 1. `color_print(content, font_color, bg_color)`
59
+ 2. `save_pic_iterly(pic_name, postfix, info)`
60
+ 3. `read_csv_tqdm(path, **kwargs)`
61
+ 4. `save_result_csv(csv_path, data)`
62
+ 5. `get_unique_save_path(folder_path, base_name_pattern, start_no)`
63
+
64
+ ## graph
65
+
66
+ 1. `pyg_data_to_dgl_graph(pyg_data_obj)`
67
+
68
+ ## gpu
69
+
70
+ 1. `get_gpu_memory_usage()`
71
+ 2. `display_gpu_memory_usage()`
72
+
73
+ ## Upload to PyPI
74
+
75
+ ```bash
76
+ # 1. Install build tools
77
+ pip install build twine
78
+
79
+ # 2. Build package
80
+ python -m build
81
+
82
+ # 3. Upload to PyPI
83
+ twine upload dist/*
84
+ # Username: __token__
85
+ # Password: your PyPI API Token (starts with pypi-)
86
+ ```
87
+
88
+ Get API Token: https://pypi.org/manage/account/token/
89
+
90
+ ### Save Token Locally (Optional)
91
+
92
+ Create `~/.pypirc` file to avoid entering credentials each time:
93
+
94
+ ```ini
95
+ [pypi]
96
+ username = __token__
97
+ password = pypi-your-token-here
98
+ ```
99
+
100
+ - **Linux/macOS**: `~/.pypirc`
101
+ - **Windows**: `C:\Users\<username>\.pypirc`
muyi-0.0.9/README.md ADDED
@@ -0,0 +1,61 @@
1
+ # muyi
2
+
3
+ Some useful utils for GNNs and Deep Learning.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install muyi # basic installation
9
+ pip install muyi[plot] # + matplotlib
10
+ pip install muyi[csv] # + pandas, tqdm
11
+ pip install muyi[gpu] # + GPUtil
12
+ pip install muyi[graph] # + torch, dgl
13
+ pip install muyi[all] # all dependencies
14
+ ```
15
+
16
+ ## utils
17
+
18
+ 1. `color_print(content, font_color, bg_color)`
19
+ 2. `save_pic_iterly(pic_name, postfix, info)`
20
+ 3. `read_csv_tqdm(path, **kwargs)`
21
+ 4. `save_result_csv(csv_path, data)`
22
+ 5. `get_unique_save_path(folder_path, base_name_pattern, start_no)`
23
+
24
+ ## graph
25
+
26
+ 1. `pyg_data_to_dgl_graph(pyg_data_obj)`
27
+
28
+ ## gpu
29
+
30
+ 1. `get_gpu_memory_usage()`
31
+ 2. `display_gpu_memory_usage()`
32
+
33
+ ## Upload to PyPI
34
+
35
+ ```bash
36
+ # 1. Install build tools
37
+ pip install build twine
38
+
39
+ # 2. Build package
40
+ python -m build
41
+
42
+ # 3. Upload to PyPI
43
+ twine upload dist/*
44
+ # Username: __token__
45
+ # Password: your PyPI API Token (starts with pypi-)
46
+ ```
47
+
48
+ Get API Token: https://pypi.org/manage/account/token/
49
+
50
+ ### Save Token Locally (Optional)
51
+
52
+ Create `~/.pypirc` file to avoid entering credentials each time:
53
+
54
+ ```ini
55
+ [pypi]
56
+ username = __token__
57
+ password = pypi-your-token-here
58
+ ```
59
+
60
+ - **Linux/macOS**: `~/.pypirc`
61
+ - **Windows**: `C:\Users\<username>\.pypirc`
@@ -0,0 +1,35 @@
1
+ # utils
2
+ from .utils import (
3
+ bcolors,
4
+ color_print,
5
+ save_pic_iterly,
6
+ read_csv_tqdm,
7
+ save_result_csv,
8
+ get_unique_save_path,
9
+ )
10
+
11
+ # gpu
12
+ from .gpu import (
13
+ get_gpu_memory_usage,
14
+ display_gpu_memory_usage,
15
+ )
16
+
17
+ # graph
18
+ from .graph import (
19
+ pyg_data_to_dgl_graph,
20
+ )
21
+
22
+ __all__ = [
23
+ # utils
24
+ 'bcolors',
25
+ 'color_print',
26
+ 'save_pic_iterly',
27
+ 'read_csv_tqdm',
28
+ 'save_result_csv',
29
+ 'get_unique_save_path',
30
+ # gpu
31
+ 'get_gpu_memory_usage',
32
+ 'display_gpu_memory_usage',
33
+ # graph
34
+ 'pyg_data_to_dgl_graph',
35
+ ]
@@ -1,6 +1,5 @@
1
- import GPUtil
2
-
3
1
  def get_gpu_memory_usage():
2
+ import GPUtil
4
3
  # 获取GPU使用情况
5
4
  gpus = GPUtil.getGPUs()
6
5
  gpu_memory_info = []
@@ -1,7 +1,6 @@
1
- import torch
2
- import dgl
3
-
4
1
  def pyg_data_to_dgl_graph(pyg_data_obj):
2
+ import torch
3
+ import dgl
5
4
  print(pyg_data_obj)
6
5
 
7
6
  # 获取边索引
@@ -0,0 +1,170 @@
1
+ import os
2
+ import csv
3
+ import sys
4
+
5
+ class bcolors:
6
+ HEADER = '\033[95m' # 紫色,用于标题
7
+ OKBLUE = '\033[94m' # 蓝色,用于正常或信息性消息
8
+ OKCYAN = '\033[96m' # 青色(浅蓝色),用于信息性消息
9
+ OKGREEN = '\033[92m' # 绿色,用于成功或确认的消息
10
+ WARNING = '\033[93m' # 黄色,用于警告或重要提示
11
+ FAIL = '\033[91m' # 红色,用于错误或失败的消息
12
+ ENDC = '\033[0m' # 重置所有样式,回到默认颜色
13
+ BOLD = '\033[1m' # 粗体文本
14
+ UNDERLINE = '\033[4m' # 下划线文本
15
+ BLACK = '\033[30m' # 黑色
16
+ RED = '\033[31m' # 红色
17
+ GREEN = '\033[32m' # 绿色
18
+ YELLOW = '\033[33m' # 黄色
19
+ BLUE = '\033[34m' # 蓝色
20
+ MAGENTA = '\033[35m' # 品红色(紫红色)
21
+ CYAN = '\033[36m' # 青色
22
+ WHITE = '\033[37m' # 白色
23
+ # 添加背景颜色
24
+ BG_BLACK = '\033[40m' # 黑色背景
25
+ BG_RED = '\033[41m' # 红色背景
26
+ BG_GREEN = '\033[42m' # 绿色背景
27
+ BG_YELLOW = '\033[43m' # 黄色背景
28
+ BG_BLUE = '\033[44m' # 蓝色背景
29
+ BG_MAGENTA = '\033[45m'# 品红色(紫红色)背景
30
+ BG_CYAN = '\033[46m' # 青色背景
31
+ BG_WHITE = '\033[47m' # 白色背景
32
+
33
+
34
+ def color_print(content, font_color='white', bg_color='bg_blue'):
35
+ colors = {
36
+ 'header': bcolors.HEADER,
37
+ 'okblue': bcolors.OKBLUE,
38
+ 'okcyan': bcolors.OKCYAN,
39
+ 'okgreen': bcolors.OKGREEN,
40
+ 'warning': bcolors.WARNING,
41
+ 'fail': bcolors.FAIL,
42
+ 'black': bcolors.BLACK,
43
+ 'red': bcolors.RED,
44
+ 'green': bcolors.GREEN,
45
+ 'yellow': bcolors.YELLOW,
46
+ 'blue': bcolors.BLUE,
47
+ 'magenta': bcolors.MAGENTA,
48
+ 'cyan': bcolors.CYAN,
49
+ 'white': bcolors.WHITE,
50
+ 'bg_black': bcolors.BG_BLACK,
51
+ 'bg_red': bcolors.BG_RED,
52
+ 'bg_green': bcolors.BG_GREEN,
53
+ 'bg_yellow': bcolors.BG_YELLOW,
54
+ 'bg_blue': bcolors.BG_BLUE,
55
+ 'bg_magenta': bcolors.BG_MAGENTA,
56
+ 'bg_cyan': bcolors.BG_CYAN,
57
+ 'bg_white': bcolors.BG_WHITE,
58
+ 'end': bcolors.ENDC,
59
+ }
60
+
61
+ # Apply the background color first, then the font color
62
+ print(f'{colors[bg_color]}{colors[font_color]}{content}{colors["end"]}\n')
63
+
64
+ def save_pic_iterly(pic_name, postfix, info):
65
+ import matplotlib.pyplot as plt
66
+
67
+ pic_idx=1
68
+ pic_name_full=f'{pic_name}_{pic_idx}.{postfix}'
69
+
70
+ while os.path.exists(pic_name_full):
71
+ print(f'File {pic_name_full} already exists.')
72
+ pic_idx += 1
73
+ pic_name_full=f'{pic_name}_{pic_idx}.png'
74
+
75
+ plt.savefig(pic_name_full, dpi=300, bbox_inches='tight')
76
+
77
+ color_print(f'!!!!! {info} is saved in file {pic_name_full}')
78
+
79
+ def read_csv_tqdm(path, **kwargs):
80
+ import pandas as pd
81
+ from tqdm import tqdm
82
+
83
+ INPUT_FILENAME = path
84
+ LINES_TO_READ_FOR_ESTIMATION = 20
85
+ CHUNK_SIZE_PER_ITERATION = 10**5
86
+
87
+
88
+ temp = pd.read_csv(INPUT_FILENAME,
89
+ nrows=LINES_TO_READ_FOR_ESTIMATION, **kwargs)
90
+ N = len(temp.to_csv(index=False))
91
+ df = [temp[:0]]
92
+ t = int(os.path.getsize(INPUT_FILENAME)/N*LINES_TO_READ_FOR_ESTIMATION/CHUNK_SIZE_PER_ITERATION) + 1
93
+
94
+
95
+ with tqdm(total = t, file = sys.stdout) as pbar:
96
+ for i,chunk in enumerate(pd.read_csv(INPUT_FILENAME, chunksize=CHUNK_SIZE_PER_ITERATION, low_memory=False, **kwargs)):
97
+ df.append(chunk)
98
+ pbar.set_description('Importing: %d' % (1 + i))
99
+ pbar.update(1)
100
+
101
+ # data = temp[:0].append(df)
102
+ data = pd.concat(df)
103
+
104
+ del df
105
+ return data
106
+
107
+ def save_result_csv(csv_path, data):
108
+ """保存结果到CSV文件(通用型)
109
+
110
+ Args:
111
+ csv_path: CSV文件路径
112
+ data: dict, 包含要保存的数据,key为列名,value为对应的值
113
+ """
114
+ file_exists = os.path.exists(csv_path)
115
+ headers = list(data.keys())
116
+ values = list(data.values())
117
+
118
+ with open(csv_path, 'a', newline='') as f:
119
+ writer = csv.writer(f)
120
+ if not file_exists:
121
+ writer.writerow(headers)
122
+ writer.writerow(values)
123
+
124
+ print(f"Results saved to {csv_path}")
125
+
126
+ def get_unique_save_path(folder_path, base_name_pattern, start_no=1):
127
+ """生成不重复的保存路径,自动递增版本号。
128
+
129
+ 通过检查文件是否存在,自动递增编号直到找到一个不存在的文件路径。
130
+ 如果目标文件夹不存在,会自动创建。
131
+
132
+ Args:
133
+ folder_path: 目标文件夹路径
134
+ base_name_pattern: 文件名模板,必须包含 {no} 占位符用于插入编号
135
+ 例如: "result_{no}.csv", "model_v{no}.pt"
136
+ start_no: 起始编号,默认为 1
137
+
138
+ Returns:
139
+ str: 不重复的完整文件路径
140
+
141
+ Examples:
142
+ >>> get_unique_save_path("./output", "result_{no}.csv")
143
+ './output/result_1.csv' # 如果不存在
144
+
145
+ >>> get_unique_save_path("./output", "result_{no}.csv")
146
+ './output/result_2.csv' # 如果 result_1.csv 已存在
147
+
148
+ >>> get_unique_save_path("./models", "checkpoint_v{no}.pt", start_no=10)
149
+ './models/checkpoint_v10.pt' # 从编号 10 开始
150
+
151
+ # 使用 f-string 动态构建模板(注意 {no} 需要用双花括号转义)
152
+ >>> model_name = "transformer"
153
+ >>> dataset = "ETTh1"
154
+ >>> get_unique_save_path("./results", f"{model_name}_{dataset}_{{no}}.csv")
155
+ './results/transformer_ETTh1_1.csv'
156
+
157
+ >>> pred_len = 96
158
+ >>> get_unique_save_path("./output", f"pred{pred_len}_exp{{no}}.npy")
159
+ './output/pred96_exp1.npy'
160
+ """
161
+ if not os.path.exists(folder_path):
162
+ os.makedirs(folder_path)
163
+
164
+ no = start_no
165
+ while True:
166
+ filename = base_name_pattern.format(no=no)
167
+ full_path = os.path.join(folder_path, filename)
168
+ if not os.path.exists(full_path):
169
+ return full_path
170
+ no += 1
@@ -3,7 +3,7 @@ with open("README.md", "r") as fh:
3
3
  long_description = fh.read()
4
4
  setuptools.setup(
5
5
  name="Muyi", # 模块名称
6
- version="0.0.7", # 当前版本
6
+ version="0.0.9", # 当前版本
7
7
  author="muyiiiii", # 作者
8
8
  author_email="", # 作者邮箱
9
9
  description="Some useful utils.", # 模块简介
@@ -19,6 +19,22 @@ setuptools.setup(
19
19
  ],
20
20
  # 依赖模块
21
21
  install_requires=[
22
+ # 无核心依赖,所有依赖都是可选的
22
23
  ],
24
+ # 可选依赖
25
+ extras_require={
26
+ 'plot': ['matplotlib'], # save_pic_iterly
27
+ 'csv': ['pandas', 'tqdm'], # read_csv_tqdm
28
+ 'gpu': ['GPUtil'], # get_gpu_memory_usage
29
+ 'graph': ['torch', 'dgl'], # pyg_data_to_dgl_graph
30
+ 'all': [ # 安装全部
31
+ 'matplotlib',
32
+ 'pandas',
33
+ 'tqdm',
34
+ 'GPUtil',
35
+ 'torch',
36
+ 'dgl',
37
+ ],
38
+ },
23
39
  python_requires='>=3',
24
40
  )
@@ -1,36 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: Muyi
3
- Version: 0.0.7
4
- Summary: Some useful utils.
5
- Home-page: https://github.com/Muyiiiii/muyi
6
- Author: muyiiiii
7
- Author-email:
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: License :: OSI Approved :: MIT License
10
- Classifier: Operating System :: OS Independent
11
- Requires-Python: >=3
12
- Description-Content-Type: text/markdown
13
- License-File: LICENSE
14
-
15
- # muyi
16
-
17
- Some useful utils for GNNs and Deep Learning.
18
-
19
- ```bash
20
- pip install muyi
21
- ```
22
-
23
- ## utils
24
-
25
- 1. `color_print(content)`
26
- 2. `save_pic_iterly(pic_name, postfix, info)`
27
- 3. `read_csv_tqdm(path, **kwargs)`
28
-
29
- ## graph
30
-
31
- 1. `pyg_data_to_dgl_graph(pyg_data_obj)`
32
-
33
- ## gpu
34
-
35
- 1. `get_gpu_memory_usage()`
36
- 2. `display_gpu_memory_usage()`
muyi-0.0.7/PKG-INFO DELETED
@@ -1,36 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: Muyi
3
- Version: 0.0.7
4
- Summary: Some useful utils.
5
- Home-page: https://github.com/Muyiiiii/muyi
6
- Author: muyiiiii
7
- Author-email:
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: License :: OSI Approved :: MIT License
10
- Classifier: Operating System :: OS Independent
11
- Requires-Python: >=3
12
- Description-Content-Type: text/markdown
13
- License-File: LICENSE
14
-
15
- # muyi
16
-
17
- Some useful utils for GNNs and Deep Learning.
18
-
19
- ```bash
20
- pip install muyi
21
- ```
22
-
23
- ## utils
24
-
25
- 1. `color_print(content)`
26
- 2. `save_pic_iterly(pic_name, postfix, info)`
27
- 3. `read_csv_tqdm(path, **kwargs)`
28
-
29
- ## graph
30
-
31
- 1. `pyg_data_to_dgl_graph(pyg_data_obj)`
32
-
33
- ## gpu
34
-
35
- 1. `get_gpu_memory_usage()`
36
- 2. `display_gpu_memory_usage()`
muyi-0.0.7/README.md DELETED
@@ -1,22 +0,0 @@
1
- # muyi
2
-
3
- Some useful utils for GNNs and Deep Learning.
4
-
5
- ```bash
6
- pip install muyi
7
- ```
8
-
9
- ## utils
10
-
11
- 1. `color_print(content)`
12
- 2. `save_pic_iterly(pic_name, postfix, info)`
13
- 3. `read_csv_tqdm(path, **kwargs)`
14
-
15
- ## graph
16
-
17
- 1. `pyg_data_to_dgl_graph(pyg_data_obj)`
18
-
19
- ## gpu
20
-
21
- 1. `get_gpu_memory_usage()`
22
- 2. `display_gpu_memory_usage()`
File without changes
muyi-0.0.7/muyi/utils.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import matplotlib.pyplot as plt
3
- import pandas as pd
4
- from tqdm import tqdm
5
- import sys
6
-
7
- def color_print(content):
8
- print(f'\033[1;46m{content}\033[0m\n')
9
-
10
- def save_pic_iterly(pic_name, postfix, info):
11
- pic_idx=1
12
- pic_name_full=f'{pic_name}_{pic_idx}.{postfix}'
13
-
14
- while os.path.exists(pic_name_full):
15
- print(f'File {pic_name_full} already exists.')
16
- pic_idx += 1
17
- pic_name_full=f'{pic_name}_{pic_idx}.png'
18
-
19
- plt.savefig(pic_name_full, dpi=300, bbox_inches='tight')
20
-
21
- color_print(f'!!!!! {info} is saved in file {pic_name_full}')
22
-
23
- def read_csv_tqdm(path, **kwargs):
24
- INPUT_FILENAME = path
25
- LINES_TO_READ_FOR_ESTIMATION = 20
26
- CHUNK_SIZE_PER_ITERATION = 10**5
27
-
28
-
29
- temp = pd.read_csv(INPUT_FILENAME,
30
- nrows=LINES_TO_READ_FOR_ESTIMATION, **kwargs)
31
- N = len(temp.to_csv(index=False))
32
- df = [temp[:0]]
33
- t = int(os.path.getsize(INPUT_FILENAME)/N*LINES_TO_READ_FOR_ESTIMATION/CHUNK_SIZE_PER_ITERATION) + 1
34
-
35
-
36
- with tqdm(total = t, file = sys.stdout) as pbar:
37
- for i,chunk in enumerate(pd.read_csv(INPUT_FILENAME, chunksize=CHUNK_SIZE_PER_ITERATION, low_memory=False, **kwargs)):
38
- df.append(chunk)
39
- pbar.set_description('Importing: %d' % (1 + i))
40
- pbar.update(1)
41
-
42
- # data = temp[:0].append(df)
43
- data = pd.concat(df)
44
-
45
- del df
46
- return data
File without changes
File without changes
File without changes
File without changes