utilskit 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- utilskit-0.1.0/MANIFEST.in +0 -0
- utilskit-0.1.0/PKG-INFO +27 -0
- utilskit-0.1.0/README.md +1 -0
- utilskit-0.1.0/setup.cfg +4 -0
- utilskit-0.1.0/setup.py +33 -0
- utilskit-0.1.0/utilskit/__init__.py +7 -0
- utilskit-0.1.0/utilskit/classificationutils.py +143 -0
- utilskit-0.1.0/utilskit/dataframeutils.py +273 -0
- utilskit-0.1.0/utilskit/dbutils.py +159 -0
- utilskit-0.1.0/utilskit/logutils.py +109 -0
- utilskit-0.1.0/utilskit/plotutils.py +278 -0
- utilskit-0.1.0/utilskit/timeutils.py +40 -0
- utilskit-0.1.0/utilskit/utils.py +277 -0
- utilskit-0.1.0/utilskit.egg-info/PKG-INFO +27 -0
- utilskit-0.1.0/utilskit.egg-info/SOURCES.txt +16 -0
- utilskit-0.1.0/utilskit.egg-info/dependency_links.txt +1 -0
- utilskit-0.1.0/utilskit.egg-info/requires.txt +6 -0
- utilskit-0.1.0/utilskit.egg-info/top_level.txt +1 -0
|
File without changes
|
utilskit-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: utilskit
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: description
|
|
5
|
+
Author: Kimyh
|
|
6
|
+
Author-email: kim_yh663927@naver.com
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.10
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
Requires-Dist: matplotlib==3.10.3
|
|
13
|
+
Requires-Dist: numpy==2.2.6
|
|
14
|
+
Requires-Dist: pandas==2.3.1
|
|
15
|
+
Requires-Dist: PyMySQL==1.1.1
|
|
16
|
+
Requires-Dist: SQLAlchemy==2.0.41
|
|
17
|
+
Requires-Dist: tqdm==4.67.1
|
|
18
|
+
Dynamic: author
|
|
19
|
+
Dynamic: author-email
|
|
20
|
+
Dynamic: classifier
|
|
21
|
+
Dynamic: description
|
|
22
|
+
Dynamic: description-content-type
|
|
23
|
+
Dynamic: requires-dist
|
|
24
|
+
Dynamic: requires-python
|
|
25
|
+
Dynamic: summary
|
|
26
|
+
|
|
27
|
+
baseline
|
utilskit-0.1.0/README.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
baseline
|
utilskit-0.1.0/setup.cfg
ADDED
utilskit-0.1.0/setup.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
|
|
3
|
+
setup(
|
|
4
|
+
name="utilskit", # 패키지 이름 (pip install 시 사용될 이름)
|
|
5
|
+
version="0.1.0", # 버전
|
|
6
|
+
packages=find_packages(), # textbasic 폴더 내 모든 패키지 포함
|
|
7
|
+
include_package_data=True, # 이 설정을 통해 패키지 내 데이터 파일을 포함시킬 수 있음
|
|
8
|
+
package_data={
|
|
9
|
+
},
|
|
10
|
+
install_requires=[ # 패키지 설치 시 같이 설치되도록 설정
|
|
11
|
+
"matplotlib==3.10.3",
|
|
12
|
+
"numpy==2.2.6",
|
|
13
|
+
"pandas==2.3.1",
|
|
14
|
+
"PyMySQL==1.1.1",
|
|
15
|
+
"SQLAlchemy==2.0.41",
|
|
16
|
+
"tqdm==4.67.1"
|
|
17
|
+
],
|
|
18
|
+
# install_requires=[
|
|
19
|
+
# "pandas>=1.3.0,<2.0.0", # 버전 범위 설정 방법
|
|
20
|
+
# ],
|
|
21
|
+
author="Kimyh",
|
|
22
|
+
author_email="kim_yh663927@naver.com",
|
|
23
|
+
description="description",
|
|
24
|
+
long_description=open("README.md").read(),
|
|
25
|
+
long_description_content_type="text/markdown",
|
|
26
|
+
# url="https://github.com/Kim-YoonHyun/my_package", # 깃허브 주소 등
|
|
27
|
+
classifiers=[ # 패키지의 Meta 데이터
|
|
28
|
+
"Programming Language :: Python :: 3",
|
|
29
|
+
"License :: OSI Approved :: MIT License", # 제약없는 가장 자유로운 라이센스(MIT 대학에서 이름이 유래했을뿐 MIT 가 관리하는 라이센스는 아님)
|
|
30
|
+
"Operating System :: OS Independent",
|
|
31
|
+
],
|
|
32
|
+
python_requires=">=3.10", # 최소 지원할 파이썬 버전
|
|
33
|
+
)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
'''
|
|
2
|
+
pip install pandas
|
|
3
|
+
'''
|
|
4
|
+
import sys
|
|
5
|
+
import pandas as pd
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_max_2nd_n_reliability(pred):
|
|
10
|
+
pred_min = np.expand_dims(np.min(pred, axis=1), axis=1)
|
|
11
|
+
pred = pred - pred_min
|
|
12
|
+
pred_max = np.expand_dims(np.max(pred, axis=1), axis=1)
|
|
13
|
+
pred = pred/pred_max
|
|
14
|
+
|
|
15
|
+
# 1순위 예측값 없애기
|
|
16
|
+
pred = np.where(pred == 1, -100, pred)
|
|
17
|
+
|
|
18
|
+
# 2순위 예측
|
|
19
|
+
max_2nd_index = np.argmax(pred, axis=1)
|
|
20
|
+
|
|
21
|
+
# 신뢰도 구하기
|
|
22
|
+
pred_reliability = (1 - np.max(pred, axis=1))*100
|
|
23
|
+
return max_2nd_index, pred_reliability
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def matrix2confusion(matrix, uni_label_list, round_num=4, show_percentage=True):
|
|
27
|
+
whole_sum = np.sum(matrix)
|
|
28
|
+
true_sum_list = np.sum(matrix, axis=-1).tolist()
|
|
29
|
+
pred_sum_list = np.sum(matrix, axis=-2).tolist()
|
|
30
|
+
|
|
31
|
+
# make matrix
|
|
32
|
+
if show_percentage:
|
|
33
|
+
per_num = 100
|
|
34
|
+
else:
|
|
35
|
+
per_num = 1
|
|
36
|
+
correct_sum = 0
|
|
37
|
+
for i in range(len(matrix)):
|
|
38
|
+
correct_count = matrix[i][i]
|
|
39
|
+
correct_sum += correct_count
|
|
40
|
+
pred_sum = pred_sum_list[i]
|
|
41
|
+
true_sum = true_sum_list[i]
|
|
42
|
+
|
|
43
|
+
# precision
|
|
44
|
+
try:
|
|
45
|
+
precision = correct_count / pred_sum
|
|
46
|
+
precision = np.round(precision, round_num) * per_num
|
|
47
|
+
except ZeroDivisionError:
|
|
48
|
+
precision = None
|
|
49
|
+
|
|
50
|
+
# recall
|
|
51
|
+
try:
|
|
52
|
+
recall = correct_count / true_sum
|
|
53
|
+
recall = np.round(recall, round_num) * per_num
|
|
54
|
+
except ZeroDivisionError:
|
|
55
|
+
recall = None
|
|
56
|
+
|
|
57
|
+
# f1_score
|
|
58
|
+
try:
|
|
59
|
+
f1_score = 2*precision*recall / (precision + recall)
|
|
60
|
+
f1_score = np.round(f1_score, round_num)
|
|
61
|
+
except TypeError:
|
|
62
|
+
f1_score = None
|
|
63
|
+
|
|
64
|
+
matrix[i].extend([None, precision, recall, f1_score, true_sum])
|
|
65
|
+
|
|
66
|
+
whole_accuracy = correct_sum / whole_sum
|
|
67
|
+
whole_accuracy = np.round(whole_accuracy, round_num) * per_num
|
|
68
|
+
|
|
69
|
+
# index & column
|
|
70
|
+
index_list = uni_label_list.copy()
|
|
71
|
+
index_list.append('count')
|
|
72
|
+
column_list = uni_label_list.copy()
|
|
73
|
+
column_list.extend(['accuracy', 'precision', 'recall', 'f1 score', 'count'])
|
|
74
|
+
|
|
75
|
+
# count 추가
|
|
76
|
+
pred_count = pred_sum_list + [None]*(len(column_list) - len(index_list))
|
|
77
|
+
matrix.append(pred_count)
|
|
78
|
+
|
|
79
|
+
# confusion matrix
|
|
80
|
+
confusion_matrix = pd.DataFrame(matrix, index=index_list, columns=column_list)
|
|
81
|
+
confusion_matrix['accuracy'][0] = whole_accuracy
|
|
82
|
+
|
|
83
|
+
return confusion_matrix
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def make_confusion_matrix(mode, true_list, pred_list, ignore_idx=None, round_num=4, label2id_dict=None, id2label_dict=None, show_percentage=True):
|
|
87
|
+
if mode == 'label2id':
|
|
88
|
+
uni_label_list = list(label2id_dict.keys())
|
|
89
|
+
elif mode == 'id2label':
|
|
90
|
+
uni_label_list = list(id2label_dict.values())
|
|
91
|
+
|
|
92
|
+
# matrix
|
|
93
|
+
matrix = []
|
|
94
|
+
for i in range(len(uni_label_list)):
|
|
95
|
+
matrix.append([])
|
|
96
|
+
for _ in range(len(uni_label_list)):
|
|
97
|
+
matrix[i].append(0)
|
|
98
|
+
|
|
99
|
+
# count
|
|
100
|
+
if mode == 'label2id':
|
|
101
|
+
for t, p in zip(true_list, pred_list):
|
|
102
|
+
t_i = label2id_dict[t]
|
|
103
|
+
p_i = label2id_dict[p]
|
|
104
|
+
matrix[t_i][p_i] += 1
|
|
105
|
+
|
|
106
|
+
elif mode == 'id2label':
|
|
107
|
+
for t_i, p_i in zip(true_list, pred_list):
|
|
108
|
+
# padding 등의 idx 는 무시
|
|
109
|
+
if (t_i is not None) and (t_i == ignore_idx):
|
|
110
|
+
continue
|
|
111
|
+
t_i = int(t_i)
|
|
112
|
+
p_i = int(p_i)
|
|
113
|
+
matrix[t_i][p_i] += 1
|
|
114
|
+
|
|
115
|
+
confusion_matrix = matrix2confusion(
|
|
116
|
+
matrix=matrix,
|
|
117
|
+
uni_label_list=uni_label_list,
|
|
118
|
+
round_num=round_num,
|
|
119
|
+
show_percentage=show_percentage
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
return confusion_matrix
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def reset_confusion_matrix(confusion_matrix, new_label_list, round_num=4, show_percentage=True):
|
|
126
|
+
try:
|
|
127
|
+
matrix_df = confusion_matrix[new_label_list]
|
|
128
|
+
except KeyError:
|
|
129
|
+
print('예측 결과에 존재하지 않는 라벨명을 입력하였습니다.')
|
|
130
|
+
sys.exit()
|
|
131
|
+
|
|
132
|
+
matrix_df = matrix_df.T[new_label_list]
|
|
133
|
+
matrix_df = matrix_df.T
|
|
134
|
+
matrix = matrix_df.values.tolist()
|
|
135
|
+
|
|
136
|
+
new_confusion_matrix = matrix2confusion(
|
|
137
|
+
matrix=matrix,
|
|
138
|
+
uni_label_list=new_label_list,
|
|
139
|
+
round_num=round_num,
|
|
140
|
+
show_percentage=show_percentage
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return new_confusion_matrix
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import os
|
|
3
|
+
from datetime import datetime, timedelta
|
|
4
|
+
import numpy as np
|
|
5
|
+
import pandas as pd
|
|
6
|
+
import csv
|
|
7
|
+
import warnings
|
|
8
|
+
warnings.filterwarnings('ignore')
|
|
9
|
+
|
|
10
|
+
from utilskit import utils as u
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def read_df(path):
|
|
14
|
+
extention = path.split('.')[-1]
|
|
15
|
+
if extention in ['csv', 'CSV']:
|
|
16
|
+
switch = 'csv'
|
|
17
|
+
elif extention in ['xlsx', 'xls']:
|
|
18
|
+
switch = 'excel'
|
|
19
|
+
elif extention in ['txt']:
|
|
20
|
+
switch = 'txt'
|
|
21
|
+
else:
|
|
22
|
+
raise ValueError(f'{extention}은(는) 잘못되거나 지정되지 않은 확장자입니다.')
|
|
23
|
+
|
|
24
|
+
if switch == 'csv':
|
|
25
|
+
encoding = 'utf-8-sig'
|
|
26
|
+
while True:
|
|
27
|
+
try:
|
|
28
|
+
data_df = pd.read_csv(path, encoding=encoding)
|
|
29
|
+
break
|
|
30
|
+
except UnicodeDecodeError:
|
|
31
|
+
encoding = 'cp949'
|
|
32
|
+
except pd.errors.ParserError:
|
|
33
|
+
f = open(path, encoding=encoding)
|
|
34
|
+
reader = csv.reader(f)
|
|
35
|
+
csv_list = []
|
|
36
|
+
for line in reader:
|
|
37
|
+
if len(line) != 38:
|
|
38
|
+
pass
|
|
39
|
+
csv_list.append(line)
|
|
40
|
+
f.close()
|
|
41
|
+
data_df = pd.DataFrame(csv_list)
|
|
42
|
+
data_df.columns = data_df.iloc[0].to_list()
|
|
43
|
+
data_df = data_df.drop(index=data_df.index[0]) # 0번째 행을 지움
|
|
44
|
+
break
|
|
45
|
+
if switch == 'excel':
|
|
46
|
+
data_df = pd.read_excel(path)
|
|
47
|
+
if switch == 'txt':
|
|
48
|
+
line_list = []
|
|
49
|
+
with open(path, 'r', encoding='utf-8-sig') as f:
|
|
50
|
+
for line in f.readlines():
|
|
51
|
+
line = line.replace('\n', '')
|
|
52
|
+
line_list.append(line)
|
|
53
|
+
data_df = pd.DataFrame(line_list, columns=['string'])
|
|
54
|
+
return data_df
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def utc2kor(df, time_column='time'):
|
|
58
|
+
if df.empty:
|
|
59
|
+
return df
|
|
60
|
+
df[time_column] = df[time_column].astype('str')
|
|
61
|
+
df[time_column] = df[time_column].apply(lambda x: x.replace('T', ' '))
|
|
62
|
+
df[time_column] = df[time_column].apply(lambda x: x.replace('Z', ''))
|
|
63
|
+
|
|
64
|
+
# UTC 시간을 한국 시간으로 (+9 시간)
|
|
65
|
+
df[time_column] = df[time_column].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
|
|
66
|
+
df[time_column] = df[time_column].apply(lambda x: x + timedelta(hours=9))
|
|
67
|
+
df[time_column] = df[time_column].astype('str')
|
|
68
|
+
|
|
69
|
+
df = df.sort_values(by=time_column,ascending=True)
|
|
70
|
+
|
|
71
|
+
return df
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# def dataframe_preprocessor(df,
|
|
75
|
+
# max_dict=None, min_dict=None,
|
|
76
|
+
# nan_drop_column=None,
|
|
77
|
+
# do_nan_fill_whole=False
|
|
78
|
+
# ):
|
|
79
|
+
# # ========================================================================
|
|
80
|
+
# # # 최대값 초과 --> 이상치 --> 결측치
|
|
81
|
+
# # if max_dict is not None:
|
|
82
|
+
# # df = maxadnormal2nan(df=df, max_dict=max_dict)
|
|
83
|
+
|
|
84
|
+
# # # 최소값 미만 --> 이상치 --> 결측치
|
|
85
|
+
# # if min_dict is not None:
|
|
86
|
+
# # df = minadnormal2nan(df=df, min_dict=min_dict)
|
|
87
|
+
|
|
88
|
+
# # ========================================================================
|
|
89
|
+
# # 특정 컬럼 기준 NaN 제거
|
|
90
|
+
# # if nan_drop_column is not None:
|
|
91
|
+
# # df = drop_nan(
|
|
92
|
+
# # df=df,
|
|
93
|
+
# # base_column=nan_drop_column
|
|
94
|
+
# # )
|
|
95
|
+
|
|
96
|
+
# # ========================================================================
|
|
97
|
+
# # 전후값 채우기
|
|
98
|
+
# if do_nan_fill_whole:
|
|
99
|
+
# df = df.fillna(method='ffill')
|
|
100
|
+
# df = df.fillna(method='bfill')
|
|
101
|
+
|
|
102
|
+
# return df
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def adnormal2nan(df, stan_col, max_value=None, min_value=None):
|
|
106
|
+
if max_value is not None:
|
|
107
|
+
df[stan_col][df[stan_col] > max_value] = np.nan
|
|
108
|
+
if min_value is not None:
|
|
109
|
+
df[stan_col][df[stan_col] < min_value] = np.nan
|
|
110
|
+
return df
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def time_filling(df, start, end, time_column='time'):
|
|
114
|
+
if df.empty:
|
|
115
|
+
return df
|
|
116
|
+
|
|
117
|
+
time_range = pd.date_range(start=start, end=end, freq='S')
|
|
118
|
+
time_range_df = pd.DataFrame(time_range, columns=[time_column])
|
|
119
|
+
time_range_df = time_range_df.astype('str')
|
|
120
|
+
|
|
121
|
+
# 합치기
|
|
122
|
+
df = pd.merge(df, time_range_df, how='right')
|
|
123
|
+
return df
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def drop_nan(df, stan_col):
|
|
127
|
+
try:
|
|
128
|
+
df = df.dropna(subset=[stan_col])
|
|
129
|
+
except KeyError:
|
|
130
|
+
pass
|
|
131
|
+
return df
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def isdfvalid(df, valid_column_list):
|
|
135
|
+
# 유효 컬럼 존재 여부 확인
|
|
136
|
+
try:
|
|
137
|
+
_ = df[valid_column_list]
|
|
138
|
+
return True
|
|
139
|
+
except KeyError:
|
|
140
|
+
return False
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def local_nan_correction(df, stan_col, nan_repeat=5):
|
|
144
|
+
'''
|
|
145
|
+
nan_repeat 에 지정한 수치만큼 반복되는 결측치 구간을
|
|
146
|
+
앞뒤값 채우기로 보정하는 함수
|
|
147
|
+
'''
|
|
148
|
+
stan_ary = df[stan_col].values
|
|
149
|
+
nan_start_idx_list, nan_end_idx_list = u.identify_stan_repeat_section(
|
|
150
|
+
ary=stan_ary,
|
|
151
|
+
stan_value='nan',
|
|
152
|
+
stan_repeat=nan_repeat,
|
|
153
|
+
mode='below',
|
|
154
|
+
reverse=False
|
|
155
|
+
)
|
|
156
|
+
for nan_si, nan_ei in zip(nan_start_idx_list, nan_end_idx_list):
|
|
157
|
+
df.loc[nan_si-1:nan_ei, stan_col] = df.loc[nan_si-1:nan_ei, stan_col].fillna(method='ffill')
|
|
158
|
+
df.loc[nan_si:nan_ei+1, stan_col] = df.loc[nan_si:nan_ei+1, stan_col].fillna(method='bfill')
|
|
159
|
+
|
|
160
|
+
return df
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def pin_nan_correction(df, stan_col, max_diff=0.1, nan_repeat=3):
|
|
164
|
+
'''
|
|
165
|
+
이상치 범위에 속하지 않지만
|
|
166
|
+
데이터 흐름상 이상치로 볼 필요가 있는 국소 범위의 값들을 결측치로 변경하는 함수
|
|
167
|
+
|
|
168
|
+
예시: 20, 20, 20, 20, [ 1], 20, 20, 20, 1, 1, 2, 1
|
|
169
|
+
결과: 20, 20, 20, 20, [NaN], 20, 20, 20, 1, 1, 2, 1
|
|
170
|
+
'''
|
|
171
|
+
|
|
172
|
+
# 기준 컬럼 데이터 추출
|
|
173
|
+
stan_ary = df[stan_col].values
|
|
174
|
+
|
|
175
|
+
# 현재 값에서 이전값을 뺀 데이터 ary 를 생성
|
|
176
|
+
stan_1_list = stan_ary.tolist()
|
|
177
|
+
stan_1_list.insert(0, stan_ary[0])
|
|
178
|
+
stan_1_ary = np.array(stan_1_list)[:-1]
|
|
179
|
+
diff_ary = np.round(stan_ary - stan_1_ary, 4)
|
|
180
|
+
diff_ary = np.array(list(map(abs, diff_ary)))
|
|
181
|
+
|
|
182
|
+
#==
|
|
183
|
+
# print()
|
|
184
|
+
idx_list = []
|
|
185
|
+
for idx, diff in enumerate(diff_ary):
|
|
186
|
+
|
|
187
|
+
# 앞뒤 차이값이 최대 차이값 보다 작은 경우
|
|
188
|
+
if diff < max_diff:
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# idx 위치 이전 10개 데이터에 대한 평균
|
|
192
|
+
before_aver = np.average(stan_ary[idx-10:idx])
|
|
193
|
+
|
|
194
|
+
# idx 위치 이후 10개 데이터에 대한 평균
|
|
195
|
+
after_aver = np.average(stan_ary[idx+1:idx+11])
|
|
196
|
+
|
|
197
|
+
# 구간 내 nan 이 존재하는 경우 앞뒤 평균을 동일시
|
|
198
|
+
if str(before_aver) == 'nan':
|
|
199
|
+
before_aver = after_aver
|
|
200
|
+
if str(after_aver) == 'nan':
|
|
201
|
+
after_aver = before_aver
|
|
202
|
+
|
|
203
|
+
# 앞뒤 평균값 간의 차이값 절대값 계산
|
|
204
|
+
aver_diff = abs(after_aver - before_aver)
|
|
205
|
+
|
|
206
|
+
# 바로 앞 뒤의 차이값과 평균값 간 차이값의 차이값 p 계산
|
|
207
|
+
p = np.round(diff - aver_diff, 4)
|
|
208
|
+
|
|
209
|
+
# p 가 최대 차이값 보다 큰 경우 이상치로 판단
|
|
210
|
+
if p > max_diff:
|
|
211
|
+
idx_list.append(idx)
|
|
212
|
+
|
|
213
|
+
# print(f'{idx:5d}, {before_aver:.2f}, {diff:.2f}, {after_aver:.2f}, {aver_diff:.2f}')
|
|
214
|
+
# print(p)
|
|
215
|
+
# print(idx_list)
|
|
216
|
+
del idx
|
|
217
|
+
# idx_list = [0, 1, 11094, 11095, 12894, 12895, 12896, 12897, 35710, 35711]
|
|
218
|
+
# for i in idx_list:
|
|
219
|
+
# print(stan_ary[i])
|
|
220
|
+
# del i
|
|
221
|
+
#==
|
|
222
|
+
|
|
223
|
+
# print('-----------------------------------------')
|
|
224
|
+
# print(np.round(stan_ary[0:10], 4).tolist())
|
|
225
|
+
# print(np.round(stan_1_ary[0:10], 4).tolist())
|
|
226
|
+
# print(np.round(diff_ary[0:10], 4).tolist())
|
|
227
|
+
|
|
228
|
+
# print('-----------------------------------------')
|
|
229
|
+
# print(np.round(stan_ary[11090:11100], 4).tolist())
|
|
230
|
+
# print(np.round(stan_1_ary[11090:11100], 4).tolist())
|
|
231
|
+
# print(np.round(diff_ary[11090:11100], 4).tolist())
|
|
232
|
+
|
|
233
|
+
# print('-----------------------------------------')
|
|
234
|
+
# print(np.round(stan_ary[12890:12900], 4).tolist())
|
|
235
|
+
# print(np.round(stan_1_ary[12890:12900], 4).tolist())
|
|
236
|
+
# print(np.round(diff_ary[12890:12900], 4).tolist())
|
|
237
|
+
|
|
238
|
+
# print('-----------------------------------------')
|
|
239
|
+
# print(np.round(stan_ary[17945:17955], 4).tolist())
|
|
240
|
+
# print(np.round(stan_1_ary[17945:17955], 4).tolist())
|
|
241
|
+
# print(np.round(diff_ary[17945:17955], 4).tolist())
|
|
242
|
+
|
|
243
|
+
# print('-----------------------------------------')
|
|
244
|
+
# print(np.round(stan_ary[-10:], 4).tolist())
|
|
245
|
+
# print(np.round(stan_1_ary[-10:], 4).tolist())
|
|
246
|
+
# print(np.round(diff_ary[-10:], 4).tolist())
|
|
247
|
+
|
|
248
|
+
# pin idx 가 존재하는 경우 해당 범위를 nan 으로 대체
|
|
249
|
+
temp_ary = stan_ary.copy()
|
|
250
|
+
if len(idx_list) > 0:
|
|
251
|
+
for idx in idx_list:
|
|
252
|
+
if idx < 3:
|
|
253
|
+
temp_ary[:idx+3] = np.nan
|
|
254
|
+
else:
|
|
255
|
+
temp_ary[idx-3:idx+3] = np.nan
|
|
256
|
+
|
|
257
|
+
# nan 의 위치 구하기
|
|
258
|
+
for_fill_start_idx_list, for_fill_end_idx_list = u.identify_stan_repeat_section(
|
|
259
|
+
ary=temp_ary,
|
|
260
|
+
stan_value='nan',
|
|
261
|
+
stan_repeat=nan_repeat,
|
|
262
|
+
mode='below',
|
|
263
|
+
reverse=False
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
# 해당 부분을 NaN 값으로 변환
|
|
268
|
+
for fsi, fei in zip(for_fill_start_idx_list, for_fill_end_idx_list):
|
|
269
|
+
df.loc[fsi:fei, stan_col] = np.nan
|
|
270
|
+
df.loc[fsi-1:fei, stan_col] = df.loc[fsi-1:fei, stan_col].fillna(method='ffill')
|
|
271
|
+
df.loc[fsi:fei+1, stan_col] = df.loc[fsi:fei+1, stan_col].fillna(method='bfill')
|
|
272
|
+
|
|
273
|
+
return df
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# DB
|
|
2
|
+
import pandas as pd
|
|
3
|
+
import pymysql
|
|
4
|
+
from sqlalchemy import create_engine
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# def get_info():
|
|
8
|
+
# db_host = '192.168.0.85'
|
|
9
|
+
# db_port = 3306
|
|
10
|
+
# db_user = 'theimc'
|
|
11
|
+
# db_passward = 'theimc#10!'
|
|
12
|
+
# db_name = 'BUSMONITORING'
|
|
13
|
+
# charset = 'utf8mb4'
|
|
14
|
+
# if_exists = 'append'
|
|
15
|
+
# autocommit = True
|
|
16
|
+
# return (db_host, db_port, db_user, db_passward, db_name, charset, if_exists, autocommit)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# def get_info_p5000():
|
|
20
|
+
# db_host = "59.25.131.135"
|
|
21
|
+
# db_port = 3306
|
|
22
|
+
# db_user = "ai_m"
|
|
23
|
+
# db_password = "temp"
|
|
24
|
+
# db_name = "bus"
|
|
25
|
+
# charset = 'utf8mb4'
|
|
26
|
+
# if_exists = 'append'
|
|
27
|
+
# autocommit = True
|
|
28
|
+
# return (db_host, db_port, db_user, db_password, db_name, charset, if_exists ,autocommit)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def db_connect(host, user, port, passward, name, charset='utf8mb4', if_exists='append', autocommit=True):
|
|
32
|
+
# db_host = db_info_dict['host']
|
|
33
|
+
# db_port = db_info_dict['port']
|
|
34
|
+
# db_user = db_info_dict['user']
|
|
35
|
+
# db_passward = db_info_dict['passward']
|
|
36
|
+
# db_name = db_info_dict['name']
|
|
37
|
+
# charset = db_info_dict['charset']
|
|
38
|
+
# if_exists = db_info_dict['if_exists']
|
|
39
|
+
# autocommit = db_info_dict['autocommit']
|
|
40
|
+
conn = pymysql.connect(
|
|
41
|
+
host=host,
|
|
42
|
+
user=user,
|
|
43
|
+
port=port,
|
|
44
|
+
password=passward,
|
|
45
|
+
db=name,
|
|
46
|
+
charset=charset,
|
|
47
|
+
autocommit=autocommit
|
|
48
|
+
)
|
|
49
|
+
return conn
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def select_db(conn, query, where=None):
|
|
53
|
+
cursor = conn.cursor()
|
|
54
|
+
query = query
|
|
55
|
+
cursor.execute(query)
|
|
56
|
+
info = cursor.fetchall()
|
|
57
|
+
cursor.close()
|
|
58
|
+
return info
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def delete_db(db_info_dict, table, where=None):
|
|
62
|
+
cursor = db_connect(db_info_dict).cursor()
|
|
63
|
+
if where:
|
|
64
|
+
query = f"""
|
|
65
|
+
DELETE FROM {table}
|
|
66
|
+
where {where}
|
|
67
|
+
"""
|
|
68
|
+
else:
|
|
69
|
+
query = f"""
|
|
70
|
+
DELETE FROM {table}
|
|
71
|
+
"""
|
|
72
|
+
cursor.execute(query)
|
|
73
|
+
cursor.close()
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def update_db(db_info_dict, table, set_, where):
|
|
77
|
+
cursor = db_connect(db_info_dict).cursor()
|
|
78
|
+
query = f"""
|
|
79
|
+
update {table}
|
|
80
|
+
set {set_}
|
|
81
|
+
where {where}
|
|
82
|
+
"""
|
|
83
|
+
cursor.execute(query)
|
|
84
|
+
cursor.close()
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def pd2db(db_host, db_port, db_user, db_passward,
|
|
89
|
+
db_name, charset, if_exists, autocommit,
|
|
90
|
+
df, table, encoding='utf-8-sig', index=False):
|
|
91
|
+
|
|
92
|
+
url = f"mysql+pymysql://{db_user}:{db_passward}@{db_host}:{db_port}/{db_name}?charset={charset}"
|
|
93
|
+
# engine = create_engine(url, encoding=encoding)
|
|
94
|
+
engine = create_engine(url)
|
|
95
|
+
conn = engine.connect()
|
|
96
|
+
df.to_sql(name=table, con=engine, if_exists=if_exists, index=index)
|
|
97
|
+
conn.close()
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# def pd2db(db_info_dict, df, table, encoding='utf-8-sig', index=False):
|
|
101
|
+
# db_host = db_info_dict['host']
|
|
102
|
+
# db_port = db_info_dict['port']
|
|
103
|
+
# db_user = db_info_dict['user']
|
|
104
|
+
# db_passward = db_info_dict['passward']
|
|
105
|
+
# db_name = db_info_dict['name']
|
|
106
|
+
# charset = db_info_dict['charset']
|
|
107
|
+
# if_exists = db_info_dict['if_exists']
|
|
108
|
+
# autocommit = db_info_dict['autocommit']
|
|
109
|
+
# url = f"mysql+pymysql://{db_user}:{db_passward}@{db_host}:{db_port}/{db_name}?charset={charset}"
|
|
110
|
+
# # engine = create_engine(url, encoding=encoding)
|
|
111
|
+
# engine = create_engine(url)
|
|
112
|
+
# conn = engine.connect()
|
|
113
|
+
# df.to_sql(name=table, con=engine, if_exists=if_exists, index=index)
|
|
114
|
+
# conn.close()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
"""
|
|
118
|
+
def main():
|
|
119
|
+
db_host = '211.195.9.226'
|
|
120
|
+
db_port = 3306
|
|
121
|
+
db_user = 'root'
|
|
122
|
+
db_passward = 'theimc#10!'
|
|
123
|
+
db_name = 'flagship'
|
|
124
|
+
charset = 'utf8mb4'
|
|
125
|
+
table = 'test'
|
|
126
|
+
if_exists = 'append'
|
|
127
|
+
autocommit = True
|
|
128
|
+
|
|
129
|
+
df = pd.read_csv('D:/python/project/temp/example.csv', encoding='utf-8-sig')
|
|
130
|
+
|
|
131
|
+
pd2db(
|
|
132
|
+
db_user=db_user,
|
|
133
|
+
db_passward=db_passward,
|
|
134
|
+
db_host=db_host,
|
|
135
|
+
db_port=db_port,
|
|
136
|
+
db_name=db_name,
|
|
137
|
+
charset=charset,
|
|
138
|
+
df=df,
|
|
139
|
+
table=table,
|
|
140
|
+
if_exists=if_exists,
|
|
141
|
+
encoding='utf-8-sig',
|
|
142
|
+
index=False
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
if __name__ == '__main__':
|
|
147
|
+
main()
|
|
148
|
+
|
|
149
|
+
"""
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
|