utilskit 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
utilskit/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ from . import classificationutils
2
+ from . import dataframeutils
3
+ from . import dbutils
4
+ from . import logutils
5
+ from . import plotutils
6
+ from . import timeutils
7
+ from . import utils
@@ -0,0 +1,143 @@
1
+ '''
2
+ pip install pandas
3
+ '''
4
+ import sys
5
+ import pandas as pd
6
+ import numpy as np
7
+
8
+
9
+ def get_max_2nd_n_reliability(pred):
10
+ pred_min = np.expand_dims(np.min(pred, axis=1), axis=1)
11
+ pred = pred - pred_min
12
+ pred_max = np.expand_dims(np.max(pred, axis=1), axis=1)
13
+ pred = pred/pred_max
14
+
15
+ # 1순위 예측값 없애기
16
+ pred = np.where(pred == 1, -100, pred)
17
+
18
+ # 2순위 예측
19
+ max_2nd_index = np.argmax(pred, axis=1)
20
+
21
+ # 신뢰도 구하기
22
+ pred_reliability = (1 - np.max(pred, axis=1))*100
23
+ return max_2nd_index, pred_reliability
24
+
25
+
26
+ def matrix2confusion(matrix, uni_label_list, round_num=4, show_percentage=True):
27
+ whole_sum = np.sum(matrix)
28
+ true_sum_list = np.sum(matrix, axis=-1).tolist()
29
+ pred_sum_list = np.sum(matrix, axis=-2).tolist()
30
+
31
+ # make matrix
32
+ if show_percentage:
33
+ per_num = 100
34
+ else:
35
+ per_num = 1
36
+ correct_sum = 0
37
+ for i in range(len(matrix)):
38
+ correct_count = matrix[i][i]
39
+ correct_sum += correct_count
40
+ pred_sum = pred_sum_list[i]
41
+ true_sum = true_sum_list[i]
42
+
43
+ # precision
44
+ try:
45
+ precision = correct_count / pred_sum
46
+ precision = np.round(precision, round_num) * per_num
47
+ except ZeroDivisionError:
48
+ precision = None
49
+
50
+ # recall
51
+ try:
52
+ recall = correct_count / true_sum
53
+ recall = np.round(recall, round_num) * per_num
54
+ except ZeroDivisionError:
55
+ recall = None
56
+
57
+ # f1_score
58
+ try:
59
+ f1_score = 2*precision*recall / (precision + recall)
60
+ f1_score = np.round(f1_score, round_num)
61
+ except TypeError:
62
+ f1_score = None
63
+
64
+ matrix[i].extend([None, precision, recall, f1_score, true_sum])
65
+
66
+ whole_accuracy = correct_sum / whole_sum
67
+ whole_accuracy = np.round(whole_accuracy, round_num) * per_num
68
+
69
+ # index & column
70
+ index_list = uni_label_list.copy()
71
+ index_list.append('count')
72
+ column_list = uni_label_list.copy()
73
+ column_list.extend(['accuracy', 'precision', 'recall', 'f1 score', 'count'])
74
+
75
+ # count 추가
76
+ pred_count = pred_sum_list + [None]*(len(column_list) - len(index_list))
77
+ matrix.append(pred_count)
78
+
79
+ # confusion matrix
80
+ confusion_matrix = pd.DataFrame(matrix, index=index_list, columns=column_list)
81
+ confusion_matrix['accuracy'][0] = whole_accuracy
82
+
83
+ return confusion_matrix
84
+
85
+
86
+ def make_confusion_matrix(mode, true_list, pred_list, ignore_idx=None, round_num=4, label2id_dict=None, id2label_dict=None, show_percentage=True):
87
+ if mode == 'label2id':
88
+ uni_label_list = list(label2id_dict.keys())
89
+ elif mode == 'id2label':
90
+ uni_label_list = list(id2label_dict.values())
91
+
92
+ # matrix
93
+ matrix = []
94
+ for i in range(len(uni_label_list)):
95
+ matrix.append([])
96
+ for _ in range(len(uni_label_list)):
97
+ matrix[i].append(0)
98
+
99
+ # count
100
+ if mode == 'label2id':
101
+ for t, p in zip(true_list, pred_list):
102
+ t_i = label2id_dict[t]
103
+ p_i = label2id_dict[p]
104
+ matrix[t_i][p_i] += 1
105
+
106
+ elif mode == 'id2label':
107
+ for t_i, p_i in zip(true_list, pred_list):
108
+ # padding 등의 idx 는 무시
109
+ if (t_i is not None) and (t_i == ignore_idx):
110
+ continue
111
+ t_i = int(t_i)
112
+ p_i = int(p_i)
113
+ matrix[t_i][p_i] += 1
114
+
115
+ confusion_matrix = matrix2confusion(
116
+ matrix=matrix,
117
+ uni_label_list=uni_label_list,
118
+ round_num=round_num,
119
+ show_percentage=show_percentage
120
+ )
121
+
122
+ return confusion_matrix
123
+
124
+
125
+ def reset_confusion_matrix(confusion_matrix, new_label_list, round_num=4, show_percentage=True):
126
+ try:
127
+ matrix_df = confusion_matrix[new_label_list]
128
+ except KeyError:
129
+ print('예측 결과에 존재하지 않는 라벨명을 입력하였습니다.')
130
+ sys.exit()
131
+
132
+ matrix_df = matrix_df.T[new_label_list]
133
+ matrix_df = matrix_df.T
134
+ matrix = matrix_df.values.tolist()
135
+
136
+ new_confusion_matrix = matrix2confusion(
137
+ matrix=matrix,
138
+ uni_label_list=new_label_list,
139
+ round_num=round_num,
140
+ show_percentage=show_percentage
141
+ )
142
+
143
+ return new_confusion_matrix
@@ -0,0 +1,273 @@
1
+ import sys
2
+ import os
3
+ from datetime import datetime, timedelta
4
+ import numpy as np
5
+ import pandas as pd
6
+ import csv
7
+ import warnings
8
+ warnings.filterwarnings('ignore')
9
+
10
+ from utilskit import utils as u
11
+
12
+
13
+ def read_df(path):
14
+ extention = path.split('.')[-1]
15
+ if extention in ['csv', 'CSV']:
16
+ switch = 'csv'
17
+ elif extention in ['xlsx', 'xls']:
18
+ switch = 'excel'
19
+ elif extention in ['txt']:
20
+ switch = 'txt'
21
+ else:
22
+ raise ValueError(f'{extention}은(는) 잘못되거나 지정되지 않은 확장자입니다.')
23
+
24
+ if switch == 'csv':
25
+ encoding = 'utf-8-sig'
26
+ while True:
27
+ try:
28
+ data_df = pd.read_csv(path, encoding=encoding)
29
+ break
30
+ except UnicodeDecodeError:
31
+ encoding = 'cp949'
32
+ except pd.errors.ParserError:
33
+ f = open(path, encoding=encoding)
34
+ reader = csv.reader(f)
35
+ csv_list = []
36
+ for line in reader:
37
+ if len(line) != 38:
38
+ pass
39
+ csv_list.append(line)
40
+ f.close()
41
+ data_df = pd.DataFrame(csv_list)
42
+ data_df.columns = data_df.iloc[0].to_list()
43
+ data_df = data_df.drop(index=data_df.index[0]) # 0번째 행을 지움
44
+ break
45
+ if switch == 'excel':
46
+ data_df = pd.read_excel(path)
47
+ if switch == 'txt':
48
+ line_list = []
49
+ with open(path, 'r', encoding='utf-8-sig') as f:
50
+ for line in f.readlines():
51
+ line = line.replace('\n', '')
52
+ line_list.append(line)
53
+ data_df = pd.DataFrame(line_list, columns=['string'])
54
+ return data_df
55
+
56
+
57
+ def utc2kor(df, time_column='time'):
58
+ if df.empty:
59
+ return df
60
+ df[time_column] = df[time_column].astype('str')
61
+ df[time_column] = df[time_column].apply(lambda x: x.replace('T', ' '))
62
+ df[time_column] = df[time_column].apply(lambda x: x.replace('Z', ''))
63
+
64
+ # UTC 시간을 한국 시간으로 (+9 시간)
65
+ df[time_column] = df[time_column].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
66
+ df[time_column] = df[time_column].apply(lambda x: x + timedelta(hours=9))
67
+ df[time_column] = df[time_column].astype('str')
68
+
69
+ df = df.sort_values(by=time_column,ascending=True)
70
+
71
+ return df
72
+
73
+
74
+ # def dataframe_preprocessor(df,
75
+ # max_dict=None, min_dict=None,
76
+ # nan_drop_column=None,
77
+ # do_nan_fill_whole=False
78
+ # ):
79
+ # # ========================================================================
80
+ # # # 최대값 초과 --> 이상치 --> 결측치
81
+ # # if max_dict is not None:
82
+ # # df = maxadnormal2nan(df=df, max_dict=max_dict)
83
+
84
+ # # # 최소값 미만 --> 이상치 --> 결측치
85
+ # # if min_dict is not None:
86
+ # # df = minadnormal2nan(df=df, min_dict=min_dict)
87
+
88
+ # # ========================================================================
89
+ # # 특정 컬럼 기준 NaN 제거
90
+ # # if nan_drop_column is not None:
91
+ # # df = drop_nan(
92
+ # # df=df,
93
+ # # base_column=nan_drop_column
94
+ # # )
95
+
96
+ # # ========================================================================
97
+ # # 전후값 채우기
98
+ # if do_nan_fill_whole:
99
+ # df = df.fillna(method='ffill')
100
+ # df = df.fillna(method='bfill')
101
+
102
+ # return df
103
+
104
+
105
+ def adnormal2nan(df, stan_col, max_value=None, min_value=None):
106
+ if max_value is not None:
107
+ df[stan_col][df[stan_col] > max_value] = np.nan
108
+ if min_value is not None:
109
+ df[stan_col][df[stan_col] < min_value] = np.nan
110
+ return df
111
+
112
+
113
+ def time_filling(df, start, end, time_column='time'):
114
+ if df.empty:
115
+ return df
116
+
117
+ time_range = pd.date_range(start=start, end=end, freq='S')
118
+ time_range_df = pd.DataFrame(time_range, columns=[time_column])
119
+ time_range_df = time_range_df.astype('str')
120
+
121
+ # 합치기
122
+ df = pd.merge(df, time_range_df, how='right')
123
+ return df
124
+
125
+
126
+ def drop_nan(df, stan_col):
127
+ try:
128
+ df = df.dropna(subset=[stan_col])
129
+ except KeyError:
130
+ pass
131
+ return df
132
+
133
+
134
+ def isdfvalid(df, valid_column_list):
135
+ # 유효 컬럼 존재 여부 확인
136
+ try:
137
+ _ = df[valid_column_list]
138
+ return True
139
+ except KeyError:
140
+ return False
141
+
142
+
143
+ def local_nan_correction(df, stan_col, nan_repeat=5):
144
+ '''
145
+ nan_repeat 에 지정한 수치만큼 반복되는 결측치 구간을
146
+ 앞뒤값 채우기로 보정하는 함수
147
+ '''
148
+ stan_ary = df[stan_col].values
149
+ nan_start_idx_list, nan_end_idx_list = u.identify_stan_repeat_section(
150
+ ary=stan_ary,
151
+ stan_value='nan',
152
+ stan_repeat=nan_repeat,
153
+ mode='below',
154
+ reverse=False
155
+ )
156
+ for nan_si, nan_ei in zip(nan_start_idx_list, nan_end_idx_list):
157
+ df.loc[nan_si-1:nan_ei, stan_col] = df.loc[nan_si-1:nan_ei, stan_col].fillna(method='ffill')
158
+ df.loc[nan_si:nan_ei+1, stan_col] = df.loc[nan_si:nan_ei+1, stan_col].fillna(method='bfill')
159
+
160
+ return df
161
+
162
+
163
+ def pin_nan_correction(df, stan_col, max_diff=0.1, nan_repeat=3):
164
+ '''
165
+ 이상치 범위에 속하지 않지만
166
+ 데이터 흐름상 이상치로 볼 필요가 있는 국소 범위의 값들을 결측치로 변경하는 함수
167
+
168
+ 예시: 20, 20, 20, 20, [ 1], 20, 20, 20, 1, 1, 2, 1
169
+ 결과: 20, 20, 20, 20, [NaN], 20, 20, 20, 1, 1, 2, 1
170
+ '''
171
+
172
+ # 기준 컬럼 데이터 추출
173
+ stan_ary = df[stan_col].values
174
+
175
+ # 현재 값에서 이전값을 뺀 데이터 ary 를 생성
176
+ stan_1_list = stan_ary.tolist()
177
+ stan_1_list.insert(0, stan_ary[0])
178
+ stan_1_ary = np.array(stan_1_list)[:-1]
179
+ diff_ary = np.round(stan_ary - stan_1_ary, 4)
180
+ diff_ary = np.array(list(map(abs, diff_ary)))
181
+
182
+ #==
183
+ # print()
184
+ idx_list = []
185
+ for idx, diff in enumerate(diff_ary):
186
+
187
+ # 앞뒤 차이값이 최대 차이값 보다 작은 경우
188
+ if diff < max_diff:
189
+ continue
190
+
191
+ # idx 위치 이전 10개 데이터에 대한 평균
192
+ before_aver = np.average(stan_ary[idx-10:idx])
193
+
194
+ # idx 위치 이후 10개 데이터에 대한 평균
195
+ after_aver = np.average(stan_ary[idx+1:idx+11])
196
+
197
+ # 구간 내 nan 이 존재하는 경우 앞뒤 평균을 동일시
198
+ if str(before_aver) == 'nan':
199
+ before_aver = after_aver
200
+ if str(after_aver) == 'nan':
201
+ after_aver = before_aver
202
+
203
+ # 앞뒤 평균값 간의 차이값 절대값 계산
204
+ aver_diff = abs(after_aver - before_aver)
205
+
206
+ # 바로 앞 뒤의 차이값과 평균값 간 차이값의 차이값 p 계산
207
+ p = np.round(diff - aver_diff, 4)
208
+
209
+ # p 가 최대 차이값 보다 큰 경우 이상치로 판단
210
+ if p > max_diff:
211
+ idx_list.append(idx)
212
+
213
+ # print(f'{idx:5d}, {before_aver:.2f}, {diff:.2f}, {after_aver:.2f}, {aver_diff:.2f}')
214
+ # print(p)
215
+ # print(idx_list)
216
+ del idx
217
+ # idx_list = [0, 1, 11094, 11095, 12894, 12895, 12896, 12897, 35710, 35711]
218
+ # for i in idx_list:
219
+ # print(stan_ary[i])
220
+ # del i
221
+ #==
222
+
223
+ # print('-----------------------------------------')
224
+ # print(np.round(stan_ary[0:10], 4).tolist())
225
+ # print(np.round(stan_1_ary[0:10], 4).tolist())
226
+ # print(np.round(diff_ary[0:10], 4).tolist())
227
+
228
+ # print('-----------------------------------------')
229
+ # print(np.round(stan_ary[11090:11100], 4).tolist())
230
+ # print(np.round(stan_1_ary[11090:11100], 4).tolist())
231
+ # print(np.round(diff_ary[11090:11100], 4).tolist())
232
+
233
+ # print('-----------------------------------------')
234
+ # print(np.round(stan_ary[12890:12900], 4).tolist())
235
+ # print(np.round(stan_1_ary[12890:12900], 4).tolist())
236
+ # print(np.round(diff_ary[12890:12900], 4).tolist())
237
+
238
+ # print('-----------------------------------------')
239
+ # print(np.round(stan_ary[17945:17955], 4).tolist())
240
+ # print(np.round(stan_1_ary[17945:17955], 4).tolist())
241
+ # print(np.round(diff_ary[17945:17955], 4).tolist())
242
+
243
+ # print('-----------------------------------------')
244
+ # print(np.round(stan_ary[-10:], 4).tolist())
245
+ # print(np.round(stan_1_ary[-10:], 4).tolist())
246
+ # print(np.round(diff_ary[-10:], 4).tolist())
247
+
248
+ # pin idx 가 존재하는 경우 해당 범위를 nan 으로 대체
249
+ temp_ary = stan_ary.copy()
250
+ if len(idx_list) > 0:
251
+ for idx in idx_list:
252
+ if idx < 3:
253
+ temp_ary[:idx+3] = np.nan
254
+ else:
255
+ temp_ary[idx-3:idx+3] = np.nan
256
+
257
+ # nan 의 위치 구하기
258
+ for_fill_start_idx_list, for_fill_end_idx_list = u.identify_stan_repeat_section(
259
+ ary=temp_ary,
260
+ stan_value='nan',
261
+ stan_repeat=nan_repeat,
262
+ mode='below',
263
+ reverse=False
264
+ )
265
+
266
+
267
+ # 해당 부분을 NaN 값으로 변환
268
+ for fsi, fei in zip(for_fill_start_idx_list, for_fill_end_idx_list):
269
+ df.loc[fsi:fei, stan_col] = np.nan
270
+ df.loc[fsi-1:fei, stan_col] = df.loc[fsi-1:fei, stan_col].fillna(method='ffill')
271
+ df.loc[fsi:fei+1, stan_col] = df.loc[fsi:fei+1, stan_col].fillna(method='bfill')
272
+
273
+ return df
utilskit/dbutils.py ADDED
@@ -0,0 +1,159 @@
1
+ # DB
2
+ import pandas as pd
3
+ import pymysql
4
+ from sqlalchemy import create_engine
5
+
6
+
7
+ # def get_info():
8
+ # db_host = '192.168.0.85'
9
+ # db_port = 3306
10
+ # db_user = 'theimc'
11
+ # db_passward = 'theimc#10!'
12
+ # db_name = 'BUSMONITORING'
13
+ # charset = 'utf8mb4'
14
+ # if_exists = 'append'
15
+ # autocommit = True
16
+ # return (db_host, db_port, db_user, db_passward, db_name, charset, if_exists, autocommit)
17
+
18
+
19
+ # def get_info_p5000():
20
+ # db_host = "59.25.131.135"
21
+ # db_port = 3306
22
+ # db_user = "ai_m"
23
+ # db_password = "temp"
24
+ # db_name = "bus"
25
+ # charset = 'utf8mb4'
26
+ # if_exists = 'append'
27
+ # autocommit = True
28
+ # return (db_host, db_port, db_user, db_password, db_name, charset, if_exists ,autocommit)
29
+
30
+
31
+ def db_connect(host, user, port, passward, name, charset='utf8mb4', if_exists='append', autocommit=True):
32
+ # db_host = db_info_dict['host']
33
+ # db_port = db_info_dict['port']
34
+ # db_user = db_info_dict['user']
35
+ # db_passward = db_info_dict['passward']
36
+ # db_name = db_info_dict['name']
37
+ # charset = db_info_dict['charset']
38
+ # if_exists = db_info_dict['if_exists']
39
+ # autocommit = db_info_dict['autocommit']
40
+ conn = pymysql.connect(
41
+ host=host,
42
+ user=user,
43
+ port=port,
44
+ password=passward,
45
+ db=name,
46
+ charset=charset,
47
+ autocommit=autocommit
48
+ )
49
+ return conn
50
+
51
+
52
+ def select_db(conn, query, where=None):
53
+ cursor = conn.cursor()
54
+ query = query
55
+ cursor.execute(query)
56
+ info = cursor.fetchall()
57
+ cursor.close()
58
+ return info
59
+
60
+
61
+ def delete_db(db_info_dict, table, where=None):
62
+ cursor = db_connect(db_info_dict).cursor()
63
+ if where:
64
+ query = f"""
65
+ DELETE FROM {table}
66
+ where {where}
67
+ """
68
+ else:
69
+ query = f"""
70
+ DELETE FROM {table}
71
+ """
72
+ cursor.execute(query)
73
+ cursor.close()
74
+
75
+
76
+ def update_db(db_info_dict, table, set_, where):
77
+ cursor = db_connect(db_info_dict).cursor()
78
+ query = f"""
79
+ update {table}
80
+ set {set_}
81
+ where {where}
82
+ """
83
+ cursor.execute(query)
84
+ cursor.close()
85
+
86
+
87
+
88
+ def pd2db(db_host, db_port, db_user, db_passward,
89
+ db_name, charset, if_exists, autocommit,
90
+ df, table, encoding='utf-8-sig', index=False):
91
+
92
+ url = f"mysql+pymysql://{db_user}:{db_passward}@{db_host}:{db_port}/{db_name}?charset={charset}"
93
+ # engine = create_engine(url, encoding=encoding)
94
+ engine = create_engine(url)
95
+ conn = engine.connect()
96
+ df.to_sql(name=table, con=engine, if_exists=if_exists, index=index)
97
+ conn.close()
98
+
99
+
100
+ # def pd2db(db_info_dict, df, table, encoding='utf-8-sig', index=False):
101
+ # db_host = db_info_dict['host']
102
+ # db_port = db_info_dict['port']
103
+ # db_user = db_info_dict['user']
104
+ # db_passward = db_info_dict['passward']
105
+ # db_name = db_info_dict['name']
106
+ # charset = db_info_dict['charset']
107
+ # if_exists = db_info_dict['if_exists']
108
+ # autocommit = db_info_dict['autocommit']
109
+ # url = f"mysql+pymysql://{db_user}:{db_passward}@{db_host}:{db_port}/{db_name}?charset={charset}"
110
+ # # engine = create_engine(url, encoding=encoding)
111
+ # engine = create_engine(url)
112
+ # conn = engine.connect()
113
+ # df.to_sql(name=table, con=engine, if_exists=if_exists, index=index)
114
+ # conn.close()
115
+
116
+
117
+ """
118
+ def main():
119
+ db_host = '211.195.9.226'
120
+ db_port = 3306
121
+ db_user = 'root'
122
+ db_passward = 'theimc#10!'
123
+ db_name = 'flagship'
124
+ charset = 'utf8mb4'
125
+ table = 'test'
126
+ if_exists = 'append'
127
+ autocommit = True
128
+
129
+ df = pd.read_csv('D:/python/project/temp/example.csv', encoding='utf-8-sig')
130
+
131
+ pd2db(
132
+ db_user=db_user,
133
+ db_passward=db_passward,
134
+ db_host=db_host,
135
+ db_port=db_port,
136
+ db_name=db_name,
137
+ charset=charset,
138
+ df=df,
139
+ table=table,
140
+ if_exists=if_exists,
141
+ encoding='utf-8-sig',
142
+ index=False
143
+ )
144
+
145
+
146
+ if __name__ == '__main__':
147
+ main()
148
+
149
+ """
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+
utilskit/logutils.py ADDED
@@ -0,0 +1,109 @@
1
+ import sys
2
+ import os
3
+ from tqdm import tqdm
4
+ import shutil
5
+ import warnings
6
+ warnings.filterwarnings('ignore')
7
+
8
+
9
+ def get_logger(get, save_path, log_file_name, time_handler=True, console_display=False, logging_level='info'):
10
+ '''
11
+ 로거 함수
12
+
13
+ parameters
14
+ ----------
15
+ get: str
16
+ log 생성용 이름.
17
+
18
+ log_file_name: str
19
+ logger 파일을 생성할 때 적용할 파일 이름 + path.
20
+
21
+ time_handler: bool (default: True)
22
+ 자정(00:00) 을 넘긴 경우 그때까지 쌓인 기록을 이전 날짜 기록으로 뺄지 여부
23
+
24
+ console_display: bool (default: False)
25
+ 로그 기록값을 콘솔에 표시할것인지 여부
26
+
27
+ logging_level: str
28
+ logger 를 표시할 수준. (notset < debug < info < warning < error < critical)
29
+
30
+ returns
31
+ -------
32
+ logger: logger
33
+ 로거를 적용할 수 있는 로거 변수
34
+ '''
35
+ import logging
36
+ from logging import handlers
37
+ os.makedirs(save_path, exist_ok=True)
38
+
39
+ logger = logging.getLogger(get)
40
+ if logging_level == 'critical':
41
+ logger.setLevel(logging.CRITICAL)
42
+ if logging_level == 'error':
43
+ logger.setLevel(logging.ERROR)
44
+ if logging_level == 'warning':
45
+ logger.setLevel(logging.WARNING)
46
+ if logging_level == 'info':
47
+ logger.setLevel(logging.INFO)
48
+ if logging_level == 'debug':
49
+ logger.setLevel(logging.DEBUG)
50
+ if logging_level == 'notset':
51
+ logger.setLevel(logging.NOTSET)
52
+
53
+ # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
54
+ formatter = logging.Formatter('%(asctime)s level:%(levelname)s %(filename)s line %(lineno)d %(message)s')
55
+ if console_display:
56
+ stream_handler = logging.StreamHandler()
57
+ stream_handler.setFormatter(formatter)
58
+ logger.addHandler(stream_handler)
59
+
60
+ if time_handler:
61
+ file_handler = handlers.TimedRotatingFileHandler(
62
+ filename=f'{save_path}/{log_file_name}',
63
+ when="midnight",
64
+ interval=1,
65
+ backupCount=30,
66
+ encoding="utf-8")
67
+ file_handler.suffix = '%Y%m%d'
68
+ else:
69
+ file_handler = logging.FileHandler(f'{save_path}/{log_file_name}')
70
+
71
+ file_handler.setFormatter(formatter)
72
+ logger.addHandler(file_handler)
73
+ return logger
74
+
75
+
76
+ def log_sort(log_path):
77
+ os.makedirs(f'{log_path}_history', exist_ok=True)
78
+ log_file_list = os.listdir(log_path)
79
+ log_file_list.sort()
80
+
81
+ log_dict = {}
82
+ for log_file in log_file_list:
83
+ log_name = log_file.split('.')[0]
84
+ date = log_file.split('.')[-1]
85
+ if date == 'log':
86
+ continue
87
+ try:
88
+ log_dict[log_name].append(log_file)
89
+ except KeyError:
90
+ log_dict[log_name] = [log_file]
91
+
92
+
93
+ for log_name, log_list in log_dict.items():
94
+ for log_file in tqdm(log_list):
95
+ date = log_file.split('.')[-1]
96
+ yyyy = date[:4]
97
+ mm = date[4:6]
98
+ # dd = date[6:]
99
+ move_path = f'{log_path}_history/{yyyy}/{mm}/{log_name}'
100
+ os.makedirs(move_path, exist_ok=True)
101
+ shutil.move(
102
+ f'{log_path}/{log_file}',
103
+ f'{move_path}/{log_file}'
104
+ )
105
+
106
+
107
+ if __name__ == "__main__":
108
+ root_path = ''
109
+ log_sort(root_path)
utilskit/plotutils.py ADDED
@@ -0,0 +1,278 @@
1
+ import sys
2
+ import os
3
+ import matplotlib.pyplot as plt
4
+
5
+ COLOR_DICT = {
6
+ 'default':None,
7
+ 'blue':'b',
8
+ 'green':'g',
9
+ 'red':'r',
10
+ 'cyan':'c',
11
+ 'magenta':'m',
12
+ 'yellow':'y',
13
+ 'black':'k',
14
+ 'white':'w',
15
+ 'orange':'#ffa500',
16
+ 'pink':'#ffc0cb',
17
+ 'khaki':'#f0e68c',
18
+ 'gold':'#ffd700',
19
+ 'skyblue':'#87ceeb',
20
+ 'navy':'#000080',
21
+ 'lightgreen':'#90ee90',
22
+ 'olive':'#808000',
23
+ 'violet':'#ee82ee',
24
+ 'gray':'#808080',
25
+ 'brown':'#a52a2a'
26
+ }
27
+
28
+ LINE_DICT = {
29
+ 'default':'-',
30
+ 'None':'None',
31
+ 'line':'-',
32
+ 'dash':'--',
33
+ 'dot':':',
34
+ 'dash-dot':'-.'
35
+ }
36
+
37
+ MARKER_DICT = {
38
+ 'default':None,
39
+ 'None':None,
40
+ 'dot':'.',
41
+ 'pixel':',',
42
+ 'circle':'o',
43
+ 'triangle_down':'v',
44
+ 'triangle_up':'^',
45
+ 'triangle_left':'<',
46
+ 'triangle_right':'>',
47
+ 'tri_down':'1',
48
+ 'tri_up':'2',
49
+ 'tri_left':'3',
50
+ 'tri_right':'4',
51
+ 'square':'s',
52
+ 'pentagon':'p',
53
+ 'star':'*',
54
+ 'hexagon1':'h',
55
+ 'hexagon2':'H',
56
+ 'plus':'+',
57
+ 'x':'x',
58
+ 'diamond':'D',
59
+ 'thin_diamond':'d'
60
+ }
61
+
62
+ def get_style(line_style='default', line_size='default', line_color='default',
63
+ marker_style='default', marker_size='default', marker_color='default',
64
+ marker_border_size='default', marker_border_color='default'):
65
+
66
+ # 라인 스타일
67
+ try:
68
+ ls = LINE_DICT[line_style]
69
+ except KeyError:
70
+ ls = '-'
71
+
72
+
73
+ # 라인 크기
74
+ if line_size == 'default':
75
+ lw = None
76
+ else:
77
+ lw = line_size
78
+
79
+ # 라인 색
80
+ try:
81
+ c = COLOR_DICT[line_color]
82
+ except KeyError:
83
+ c = None
84
+
85
+ # 마커 형태
86
+ try:
87
+ marker = MARKER_DICT[marker_style]
88
+ except KeyError:
89
+ marker = None
90
+
91
+ # 마커 크기
92
+ if marker_size == 'default':
93
+ ms = None
94
+ else:
95
+ ms = marker_size
96
+
97
+ # 마커 색
98
+ try:
99
+ mfc = COLOR_DICT[marker_color]
100
+ except KeyError:
101
+ mfc = None
102
+
103
+ # 마커 테두리 사이즈
104
+ if marker_border_size == 'default':
105
+ mew = None
106
+ else:
107
+ mew = marker_border_size
108
+
109
+ # 마커 테두리 색
110
+ try:
111
+ mec = COLOR_DICT[marker_border_color]
112
+ except KeyError:
113
+ mec = None
114
+
115
+ return ls, lw, c, marker, ms, mfc, mew, mec
116
+
117
+
118
+ def draw_plot(title, x, y, title_font_size=13, x_font_size=13, y_font_size=13,
119
+ line_style='default', line_size='default', line_color='default',
120
+ marker_style='default', marker_size='default', marker_color='default',
121
+ marker_border_size='default', marker_border_color='default',
122
+ add_x_list=None, add_y_list=None, add_color_list=None,
123
+ fig_size=None, x_range=None, y_range=None,
124
+ focus_start_list=None, focus_end_list=None, focus_color_list=None, alpha_list=None,
125
+ label=None, save_path=None):
126
+
127
+
128
+ ls, lw, c, marker, ms, mfc, mew, mec = get_style(
129
+ line_style=line_style,
130
+ line_size=line_size,
131
+ line_color=line_color,
132
+ marker_style=marker_style,
133
+ marker_size=marker_size,
134
+ marker_color=marker_color,
135
+ marker_border_size=marker_border_size,
136
+ marker_border_color=marker_border_color
137
+ )
138
+
139
+ if fig_size:
140
+ plt.figure(figsize=fig_size)
141
+
142
+ # 축 길이
143
+ if x_range:
144
+ x_min = x_range[0]
145
+ x_max = x_range[1]
146
+ plt.xlim(x_min, x_max) # (최솟값, 최댓값)
147
+ if y_range:
148
+ y_min = y_range[0]
149
+ y_max = y_range[1]
150
+ plt.ylim(y_min, y_max)
151
+
152
+ plt.title(title, fontdict={'fontsize':title_font_size})
153
+
154
+ # x, y 축 글자 크기
155
+ plt.xticks(fontsize=x_font_size)
156
+ plt.yticks(fontsize=y_font_size)
157
+
158
+ # plot
159
+ plt.plot(
160
+ x, y,
161
+ ls=ls,
162
+ lw=lw,
163
+ c=c,
164
+ marker=marker,
165
+ ms=ms,
166
+ mfc=mfc,
167
+ mew=mew,
168
+ mec=mec,
169
+ label=label
170
+ )
171
+
172
+ # 라벨링
173
+ if label is not None:
174
+ plt.xlabel('xlabel')
175
+ plt.ylabel('ylabel')
176
+ plt.legend()
177
+
178
+ # 추가
179
+
180
+ if add_x_list is not None and add_y_list is not None:
181
+
182
+ if add_color_list is None:
183
+ for add_x, add_y in zip(add_x_list, add_y_list):
184
+ plt.plot(
185
+ add_x, add_y,
186
+ ls=ls,
187
+ marker=marker
188
+ )
189
+ else:
190
+ for add_x, add_y, add_color in zip(add_x_list, add_y_list, add_color_list):
191
+ _, _, add_c, _, _, _, _, _ = get_style(line_color=add_color)
192
+ plt.plot(
193
+ add_x, add_y,
194
+ ls=ls,
195
+ marker=marker,
196
+ c=add_c
197
+ )
198
+
199
+ # 포커싱
200
+ if focus_start_list is not None and focus_end_list is not None:
201
+ if focus_color_list is None:
202
+ focus_color_list = ['gray'] * len(focus_start_list)
203
+ if alpha_list is None:
204
+ alpha_list = [0.2] * len(focus_start_list)
205
+ for focus_start, focus_end, focus_c, alpha in zip(focus_start_list, focus_end_list, focus_color_list, alpha_list):
206
+ plt.axvspan(focus_start, focus_end, facecolor=focus_c, alpha=alpha)
207
+
208
+ if save_path:
209
+ os.makedirs(save_path, exist_ok=True)
210
+ plt.savefig(f'{save_path}/{title}.png')
211
+
212
+ plt.close('all')
213
+
214
+
215
+ def draw_subplot(image_title, sub_row_idx, sub_col_idx,
216
+ title_list, x_list, y_list,
217
+ title_font_size=13, x_font_size=13, y_font_size=13,
218
+ x_range_list=None, y_range_list=None,
219
+ # line_style='default', line_size='default', line_color='default',
220
+ # marker_style='default', marker_size='default', marker_color='default',
221
+ # marker_border_size='default', marker_border_color='default',
222
+ # add_x_list=None, add_y_list=None, add_color_list=None,
223
+ fig_size=None, # x_range=None, y_range=None,
224
+ focus_start_list=None, focus_end_list=None, focus_color_list=None, alpha_list=None,
225
+ label=None, save_path=None):
226
+
227
+ # ls, lw, c, marker, ms, mfc, mew, mec = get_style(
228
+ # line_style=line_style,
229
+ # line_size=line_size,
230
+ # line_color=line_color,
231
+ # marker_style=marker_style,
232
+ # marker_size=marker_size,
233
+ # marker_color=marker_color,
234
+ # marker_border_size=marker_border_size,
235
+ # marker_border_color=marker_border_color
236
+ # )
237
+
238
+ if fig_size is not None:
239
+ fig, axs = plt.subplots(sub_row_idx, sub_col_idx, figsize=fig_size)
240
+ else:
241
+ fig, axs = plt.subplots(sub_row_idx, sub_col_idx)
242
+
243
+ for i, ax in enumerate(axs.flat):
244
+ x_ = x_list[i]
245
+ y_ = y_list[i]
246
+ ax.plot(x_, y_)
247
+ ax.set_title(title_list[i], fontsize=title_font_size)
248
+
249
+ if x_range_list is not None:
250
+ ax.set_xlim(x_range_list[i], fontsize=x_font_size)
251
+
252
+ if y_range_list is not None:
253
+ ax.set_ylim(y_range_list[i])#, fontsize=y_font_size)
254
+ ax.tick_params(axis='x', labelsize=x_font_size)
255
+ ax.tick_params(axis='y', labelsize=y_font_size)
256
+
257
+ # 포커싱
258
+ if focus_start_list is not None and focus_end_list is not None:
259
+ if focus_color_list is None:
260
+ focus_color_list = ['gray'] * len(focus_start_list)
261
+ if alpha_list is None:
262
+ alpha_list = [0.2] * len(focus_start_list)
263
+ for focus_start, focus_end, focus_c, alpha in zip(focus_start_list, focus_end_list, focus_color_list, alpha_list):
264
+ ax.axvspan(focus_start, focus_end, facecolor=focus_c, alpha=alpha)
265
+
266
+ plt.tight_layout()
267
+ # plt.subplots_adjust(wspace=0.4, hspace=0.7, top=0.2, bottom=0.1) # wspace: 수평 간격, hspace: 수직 간격
268
+
269
+ if save_path:
270
+ os.makedirs(save_path, exist_ok=True)
271
+ plt.savefig(f'{save_path}/{image_title}.png')
272
+
273
+ plt.close(fig)
274
+ # plt.clf()
275
+
276
+
277
+
278
+
utilskit/timeutils.py ADDED
@@ -0,0 +1,40 @@
1
+ from datetime import date, datetime, timedelta
2
+ import time
3
+
4
+
5
+ # 오늘 날짜 추출
6
+ def get_now(format_string='년-월-일 시:분:초'):
7
+ now = datetime.now()
8
+ format_string = format_string.replace('년', '%Y')
9
+ format_string = format_string.replace('월', '%m')
10
+ format_string = format_string.replace('일', '%d')
11
+ format_string = format_string.replace('시', '%H')
12
+ format_string = format_string.replace('분', '%M')
13
+ format_string = format_string.replace('초', '%S')
14
+ result = now.strftime(format_string)
15
+ return result
16
+
17
+
18
+ def time_measure(start):
19
+ t = time.time() - start
20
+ h = int(t // 3600)
21
+ m = int((t % 3600) // 60)
22
+ s = int(t % 60)
23
+ return h, m ,s
24
+
25
+
26
+ def get_date_list(schedule, year, mon_list, start_day_list, end_day_list):
27
+ date_list = []
28
+ if schedule:
29
+ yesterday = date.today()# - timedelta(1)
30
+ yesterday = str(yesterday)
31
+ date_list = [yesterday]
32
+ else:
33
+ for mon in mon_list:
34
+ start_day = start_day_list[mon-1]
35
+ end_day = end_day_list[mon-1]
36
+ for dd in range(start_day, end_day+1):
37
+ dd = str(dd).zfill(2)
38
+ mm = str(mon).zfill(2)
39
+ date_list.append(f'{year}-{mm}-{dd}')
40
+ return date_list
utilskit/utils.py ADDED
@@ -0,0 +1,277 @@
1
+ '''
2
+ pip install xlrd
3
+ '''
4
+ import numpy as np
5
+ import pandas as pd
6
+ import shutil
7
+ import os
8
+ import sys
9
+ import json
10
+ import time
11
+ import csv
12
+ from tqdm import tqdm
13
+ from datetime import date, datetime, timedelta
14
+
15
+
16
+ def save_yaml(path, obj):
17
+ import yaml
18
+ with open(path, 'w') as f:
19
+ yaml.dump(obj, f, sort_keys=False)
20
+
21
+
22
+ def load_yaml(path):
23
+ import yaml
24
+ with open(path, 'r') as f:
25
+ return yaml.load(f, Loader=yaml.FullLoader)
26
+
27
+
28
+ def envs_setting(random_seed):
29
+ '''
30
+ 난수지정 등의 환경설정
31
+
32
+ parameters
33
+ ----------
34
+ random_seed: int
35
+ 설정할 random seed
36
+
37
+ returns
38
+ -------
39
+ torch, numpy, random 등에 대한 랜덤 시드 고정
40
+ '''
41
+
42
+ import torch
43
+ import torch.backends.cudnn as cudnn
44
+ import random
45
+ import numpy as np
46
+
47
+
48
+ # seed
49
+ torch.manual_seed(random_seed)
50
+ torch.cuda.manual_seed(random_seed)
51
+ torch.cuda.manual_seed_all(random_seed)
52
+ cudnn.benchmark = False
53
+ cudnn.deterministic = True
54
+ np.random.seed(random_seed)
55
+ random.seed(random_seed)
56
+
57
+
58
+
59
+ def normalize_1D(ary):
60
+ '''
61
+ 1차원데이터를 0~1 사이 값으로 normalize 하는 함수
62
+
63
+ parameters
64
+ ----------
65
+ ary: numpy array
66
+ noramlize 를 적용할 1차원 array
67
+
68
+ returns
69
+ -------
70
+ 0 ~ 1 사이로 noramalize 된 array
71
+ '''
72
+ ary = np.array(ary)
73
+
74
+ if len(ary.shape) > 1:
75
+ return print('1 차원 데이터만 입력 가능')
76
+
77
+ ary_min = np.min(ary)
78
+ ary_min = np.subtract(ary, ary_min)
79
+ ary_max = np.max(ary_min)
80
+ ary_norm = np.divide(ary_min, ary_max)
81
+
82
+ return ary_norm
83
+
84
+
85
+ def get_error_info():
86
+ import traceback
87
+ traceback_string = traceback.format_exc()
88
+ return traceback_string
89
+
90
+
91
+ def read_jsonl(data_path):
92
+ try:
93
+ data_list = validate_data(
94
+ data_path=data_path,
95
+ encoding='utf-8-sig'
96
+ )
97
+
98
+ except UnicodeDecodeError:
99
+
100
+ data_list = validate_data(
101
+ data_path=data_path,
102
+ encoding='cp949'
103
+ )
104
+ return data_list
105
+
106
+
107
+ def validate_data(data_path, encoding):
108
+ data_list = []
109
+ try:
110
+ with open(data_path, 'r', encoding=encoding) as f:
111
+ prodigy_data_list = json.load(f)
112
+ data_list.append(prodigy_data_list)
113
+ except json.decoder.JSONDecodeError:
114
+ with open(data_path, 'r', encoding=encoding) as f:
115
+ for line in f:
116
+ line = line.replace('\n', '')
117
+ line.strip()
118
+ if line[-1] == '}':
119
+ json_line = json.loads(line)
120
+ data_list.append(json_line)
121
+ return data_list
122
+
123
+
124
+ def tensor2array(x_tensor):
125
+ x_ary = x_tensor.detach().cpu().numpy()
126
+ return x_ary
127
+
128
+
129
+ def save_tensor(x_tensor, mode):
130
+ x_ary = tensor2array(x_tensor=x_tensor)
131
+
132
+ if mode == 1:
133
+ b = x_ary[0]
134
+ # b = np.round(b, 3)
135
+ b = np.where(np.absolute(b) > 2, np.round(b, 0), np.round(b, 3))
136
+ df = pd.DataFrame(b)
137
+ df.to_csv(f'./temp.csv', index=False, encoding='utf-8-sig')
138
+ print(df)
139
+ print(x_ary.shape)
140
+
141
+ if mode == 2:
142
+ ary = x_ary[0]
143
+ i, j, k = ary.shape
144
+ print(i, j, k)
145
+ for idx in range(k):
146
+ a = np.squeeze(ary[:, :, idx:idx+1])
147
+ a = np.where(np.absolute(a) > 2, np.round(a, 0), np.round(a, 3))
148
+ df = pd.DataFrame(a)
149
+ df.to_csv(f'./temp{idx}.csv', index=False, encoding='utf-8-sig')
150
+ print(df)
151
+ print(x_ary.shape)
152
+
153
+
154
+ def identify_repeat_section(ary, stan_num, include_nan=False):
155
+ '''
156
+ 데이터 array 에서 특정 숫자가 정해놓은 반복 횟수 (stan_repeat) 만큼 반복되면
157
+ 그 구간의 시작, 끝 위치 index 값을 추출한다.
158
+ '''
159
+ start_idx_list = []
160
+ end_idx_list = []
161
+ start_idx = 0
162
+ # pre_value = 'nan'
163
+ flag = 1
164
+ for idx, value in enumerate(ary):
165
+ value_str = str(value)
166
+
167
+ # 가장 처음인 경우
168
+ if idx == 0:
169
+ pre_value = value_str
170
+ continue
171
+
172
+ # 현재 값이 이전 값과 동일할때
173
+ if value_str == pre_value:
174
+ # 현재 값이 nan 이 아닌 경우 만
175
+
176
+ if include_nan:
177
+ flag += 1
178
+ else:
179
+ if value_str != 'nan':
180
+ flag += 1
181
+ # 현재 값이 이전 값과 다를때
182
+ else:
183
+ if flag >= stan_num:
184
+ start_idx_list.append(start_idx)
185
+ end_idx_list.append(idx-1)
186
+
187
+ # 시작 지점 갱신
188
+ start_idx = idx
189
+ flag = 1
190
+ pre_value = value_str
191
+
192
+ if flag >= stan_num:
193
+ start_idx_list.append(start_idx)
194
+ end_idx_list.append(idx)
195
+ return start_idx_list, end_idx_list
196
+
197
+
198
+ def identify_stan_repeat_section(ary, stan_value, stan_repeat, mode, reverse=False):
199
+ '''
200
+ ary 에서 기준값(stan_value)이 지정한 횟수(stan_repeat)
201
+ 이상(above) 또는 이하(below) 만큼 반복되는 구간의 시작, 끝 위치 index 값을 추출하는 함수
202
+ reverse 를 True 로 지정하면 해당 각 구간의 끝->시작, 시작->끝 으로 반전된다.
203
+ '''
204
+ nan_start_idx = 0
205
+ nan_start_idx_list = []
206
+ nan_end_idx_list = []
207
+ flag = 1
208
+ if len(ary) == 0:
209
+ return [], []
210
+ for idx, value in enumerate(ary):
211
+
212
+ value_str = str(value)
213
+
214
+ # 가장 처음인 경우
215
+ if idx == 0:
216
+ pre_value = value_str
217
+ continue
218
+
219
+ # 현재 값이 stan 일 때
220
+ if value_str == stan_value:
221
+ # 이전값이 nan 인경우
222
+ if pre_value == stan_value:
223
+ flag += 1
224
+ # 이전 값이 nan 이 아닌 경우
225
+ else:
226
+ flag = 1
227
+ # idx 시작 위치 지정
228
+ nan_start_idx = idx
229
+
230
+ # 현재 값이 nan 이 아닐 때
231
+ else:
232
+ # 이전 값이 nan 인 경우
233
+ if pre_value == stan_value:
234
+ # idx 끝 위치 지정
235
+ if mode == 'above':
236
+ if flag >= stan_repeat:
237
+ nan_start_idx_list.append(nan_start_idx)
238
+ nan_end_idx_list.append(idx-1)
239
+ elif mode == 'below':
240
+ if flag <= stan_repeat:
241
+ nan_start_idx_list.append(nan_start_idx)
242
+ nan_end_idx_list.append(idx-1)
243
+ else:
244
+ print('mode 를 above 또는 이하 below 중 하나로 지정해주세요')
245
+ raise KeyError()
246
+ pre_value = value_str
247
+
248
+ if value_str == stan_value:
249
+ if mode == 'above':
250
+ if flag >= stan_repeat:
251
+ nan_start_idx_list.append(nan_start_idx)
252
+ nan_end_idx_list.append(idx)
253
+ elif mode == 'below':
254
+ if flag <= stan_repeat:
255
+ nan_start_idx_list.append(nan_start_idx)
256
+ nan_end_idx_list.append(idx)
257
+ else:
258
+ print('mode 를 above 또는 이하 below 중 하나로 지정해주세요')
259
+ raise KeyError()
260
+
261
+ if reverse:
262
+ rev_start_idx_list = [0]
263
+ rev_end_idx_list = [len(ary)-1]
264
+ for ns_idx, ne_idx in zip(nan_start_idx_list, nan_end_idx_list):
265
+ if ns_idx == 0:
266
+ rev_start_idx_list.pop(0)
267
+ rev_start_idx_list.append(ne_idx+1)
268
+ continue
269
+ if ne_idx == len(ary)-1:
270
+ rev_end_idx_list.pop(-1)
271
+ rev_end_idx_list.append(ns_idx-1)
272
+ continue
273
+ rev_start_idx_list.append(ne_idx+1)
274
+ rev_end_idx_list.insert(-1, ns_idx-1)
275
+ return rev_start_idx_list, rev_end_idx_list
276
+
277
+ return nan_start_idx_list, nan_end_idx_list
@@ -0,0 +1,27 @@
1
+ Metadata-Version: 2.4
2
+ Name: utilskit
3
+ Version: 0.1.0
4
+ Summary: description
5
+ Author: Kimyh
6
+ Author-email: kim_yh663927@naver.com
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.10
11
+ Description-Content-Type: text/markdown
12
+ Requires-Dist: matplotlib==3.10.3
13
+ Requires-Dist: numpy==2.2.6
14
+ Requires-Dist: pandas==2.3.1
15
+ Requires-Dist: PyMySQL==1.1.1
16
+ Requires-Dist: SQLAlchemy==2.0.41
17
+ Requires-Dist: tqdm==4.67.1
18
+ Dynamic: author
19
+ Dynamic: author-email
20
+ Dynamic: classifier
21
+ Dynamic: description
22
+ Dynamic: description-content-type
23
+ Dynamic: requires-dist
24
+ Dynamic: requires-python
25
+ Dynamic: summary
26
+
27
+ baseline
@@ -0,0 +1,12 @@
1
+ utilskit/__init__.py,sha256=RdcxmxKmxbWEgIFBG8hTg-6NJE4_ylNnavxpWrPICFI,176
2
+ utilskit/classificationutils.py,sha256=QTOYCKRoveb2rqB06Lj26VLujgt_I2kJJdhuk7QEVLA,4320
3
+ utilskit/dataframeutils.py,sha256=C1DZn7JfRC7D7vhuTCttOAVfX5WKt9Kn8S0yF9agArE,9303
4
+ utilskit/dbutils.py,sha256=NGb6DDRpWmmg25fAkVQxYp76zg61Iykj9pmglZp01Bo,3993
5
+ utilskit/logutils.py,sha256=SVlXImmuEHTG6i6Qe95OIQbmCicZdL0kX03CBiQU7xI,3302
6
+ utilskit/plotutils.py,sha256=sCb8j32ieQvblZksoerzIor91y_FYwAdC0-Zugfs5Pk,8052
7
+ utilskit/timeutils.py,sha256=l2VFqlUG_Bbyyr6rmWiFGf9b2KF6i65MjM7Dhs2tiBg,1257
8
+ utilskit/utils.py,sha256=4UkDhs2PDaxN17MiQ8TXtviy_cV6l8Z-YUt6jeZ3ueo,7880
9
+ utilskit-0.1.0.dist-info/METADATA,sha256=-9P8nqc5xzWqBfUw0ckN4nTEjhEEMDuYXGFWnsy9U7w,705
10
+ utilskit-0.1.0.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
11
+ utilskit-0.1.0.dist-info/top_level.txt,sha256=bi7zXh9RMItInj0Kdx2-Owt3AFtUHm__qZ40kPbCukg,9
12
+ utilskit-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (78.1.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ utilskit