upplib 2.9.9__py3-none-any.whl → 3.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
upplib/__init__.py CHANGED
@@ -8,6 +8,7 @@ from upplib.util import *
8
8
 
9
9
  # 有关文件操作类的包
10
10
  from upplib.file import *
11
+ from upplib.file_text import *
11
12
 
12
13
  # 有关 图表的 html 代码的包
13
14
  from upplib.chart_html import *
@@ -31,3 +32,4 @@ from upplib.multi_thread import *
31
32
 
32
33
  from upplib.query_log import *
33
34
  from upplib.redis_tool import *
35
+ from upplib.format_data import *
upplib/clean_up_msg.py CHANGED
@@ -3,7 +3,31 @@ from datetime import datetime, timezone, timedelta
3
3
  from typing import Any, Optional, Union
4
4
 
5
5
 
6
- def clean_up_msg(msg: str = None, clean_up_type: int = 1, trace_id_fixed_length: int = None) -> str | None:
6
+ def simplify_msg(msg: str = None,
7
+ only_date_msg: bool = False,
8
+ msg_delete_prefix: int = 0,
9
+ ) -> str | None:
10
+ """
11
+ only_date_msg: 只输出日期+消息
12
+ msg_delete_prefix: 删除消息的前缀的数量
13
+ """
14
+ res_msg = msg
15
+ if only_date_msg:
16
+ # 只需要日期+消息的部分,其他的不需要了
17
+ msg_date = res_msg.partition(' ')[0]
18
+ msg_1 = res_msg.split(' - ')[2]
19
+ res_msg = msg_date.strip() + ' ' + msg_1.strip()
20
+ if msg_delete_prefix > 0:
21
+ msg_date, _, msg_1 = res_msg.partition(' ')
22
+ res_msg = msg_date.strip() + ' ' + msg_1.strip()[msg_delete_prefix:]
23
+ return res_msg
24
+
25
+
26
+ def clean_up_msg(msg: str = None,
27
+ clean_up_type: int = 1,
28
+ trace_id_fixed_length: int = None,
29
+ method_length: int = 31
30
+ ) -> str | None:
7
31
  if msg is None:
8
32
  return None
9
33
  formatters: list[Callable[[str], Optional[str]]] = [
@@ -17,7 +41,7 @@ def clean_up_msg(msg: str = None, clean_up_type: int = 1, trace_id_fixed_length:
17
41
  i + 1: formatter for i, formatter in enumerate(formatters)
18
42
  }
19
43
  if clean_up_type in formatter_map:
20
- return formatter_map[clean_up_type](msg, trace_id_fixed_length)
44
+ return formatter_map[clean_up_type](msg, trace_id_fixed_length, method_length)
21
45
  return msg
22
46
 
23
47
 
@@ -39,7 +63,7 @@ def get_thread_id_for_log(thread_id_str: str = None) -> str:
39
63
  return f' -{suffix}- '
40
64
 
41
65
 
42
- def clean_up_msg_1(msg: str = None, trace_id_fixed_length: int = None) -> str:
66
+ def clean_up_msg_1(msg: str = None, trace_id_fixed_length: int = None, method_length: int = 31) -> str:
43
67
  try:
44
68
  """
45
69
  2025-09-28T19:38:41.146111-06:00 com.leo.digest.aop.ApiLogAspect - traceId: - (catTraceId:rcs-gateway-0a0f2154-488625-102) - ===>API GatewayFacadeImpl#gatewayRequest START
@@ -61,7 +85,7 @@ def clean_up_msg_1(msg: str = None, trace_id_fixed_length: int = None) -> str:
61
85
  method, _, other = msg0.strip().partition(' - traceId:')
62
86
  other = msg0.strip() if not other else other
63
87
  thread_id = get_thread_id_for_log(method.strip().partition('] ')[0].rpartition('-')[2]) if '] ' in method else ' '
64
- method = method.strip()[-31:].rjust(31)
88
+ method = method.strip()[-method_length:].rjust(method_length)
65
89
  if '(catTraceId:' in other:
66
90
  trace_id = re.search(r'\(catTraceId:([^)]+)\)', other).group(1).strip()
67
91
  other_after = other.strip().partition(trace_id)[2][3:].strip()
@@ -81,7 +105,7 @@ def clean_up_msg_1(msg: str = None, trace_id_fixed_length: int = None) -> str:
81
105
  return msg
82
106
 
83
107
 
84
- def clean_up_msg_2(msg: str = None, trace_id_fixed_length: int = None) -> str:
108
+ def clean_up_msg_2(msg: str = None, trace_id_fixed_length: int = None, method_length: int = 31) -> str:
85
109
  try:
86
110
  """
87
111
  2025-10-09T13:45:49.687123+08:00 INFO 8 --- [nio-8080-exec-4] c.l.r.b.s.device.impl.DeviceServiceImpl : (catTraceId:customer-product-0a5a0329-488885-107496) - checkDeviceId lock key: 1073852969169211259
@@ -93,7 +117,7 @@ def clean_up_msg_2(msg: str = None, trace_id_fixed_length: int = None) -> str:
93
117
  time1, _, msg0 = msg.strip().partition(' ')
94
118
  thread_id = get_thread_id_for_log(msg0.strip().rpartition('] ')[0].rpartition('-')[2])
95
119
  method, _, other = msg0.strip().rpartition(' : ')
96
- method = method.strip()[-31:].rjust(31)
120
+ method = method.strip()[-method_length:].rjust(method_length)
97
121
  trace_id = ''
98
122
  if '(catTraceId:' in other:
99
123
  trace_id = re.search(r'\(catTraceId:([^)]+)\)', other).group(1)
@@ -105,7 +129,7 @@ def clean_up_msg_2(msg: str = None, trace_id_fixed_length: int = None) -> str:
105
129
  return msg
106
130
 
107
131
 
108
- def clean_up_msg_3(msg: str = None, trace_id_fixed_length: int = None) -> str:
132
+ def clean_up_msg_3(msg: str = None, trace_id_fixed_length: int = None, method_length: int = 31) -> str:
109
133
  try:
110
134
  """
111
135
  2025-10-09T14:25:28.096+07:00 INFO com.itn.idn.review.aop.LogAspect - traceId:db57046b7cba9d5c55fa5ff93727c4df - ReviewBackController.queryCreditCasesByUserIdsV2: request log info-------------> {"userIds":[1011450014961537063]}
@@ -114,7 +138,7 @@ def clean_up_msg_3(msg: str = None, trace_id_fixed_length: int = None) -> str:
114
138
  SEP_S = ' - traceId:'
115
139
  time1, _, msg0 = msg.strip().partition(' ')
116
140
  msg1 = msg0.strip().split(SEP_S)
117
- method = ' ' + msg1[0].strip()[-31:].rjust(31)
141
+ method = ' ' + msg1[0].strip()[-method_length:].rjust(method_length)
118
142
  trace_id, _, other = msg1[1].strip().partition(' - ')
119
143
  trace_id = f' - {trace_id} - ' if trace_id else ' - '
120
144
  return f'{time1}{method}{trace_id}{other}'
@@ -122,7 +146,7 @@ def clean_up_msg_3(msg: str = None, trace_id_fixed_length: int = None) -> str:
122
146
  return msg
123
147
 
124
148
 
125
- def clean_up_msg_4(msg: str = None, trace_id_fixed_length: int = None) -> str:
149
+ def clean_up_msg_4(msg: str = None, trace_id_fixed_length: int = None, method_length: int = 15) -> str:
126
150
  try:
127
151
  """
128
152
  2025-10-11T10:49:24.071000+07:00 INFO [TID: N/A] [8] [strategyAsyncExecutor-1] [FlowExecutor] [-] [33f7a5cfed3548f9aa3cc39079d1e407]:(catTraceId:rcs-provider-server-0a1e0d61-488931-966531) - requestId has generated
@@ -138,7 +162,7 @@ def clean_up_msg_4(msg: str = None, trace_id_fixed_length: int = None) -> str:
138
162
  time1, _, msg0 = msg.strip().partition(' ')
139
163
  msg1 = msg0.split(SEP_S)
140
164
  thread_id = get_thread_id_for_log(msg1[2].strip().rpartition('-')[2])
141
- method = msg1[3].strip().strip()[-15:].strip().replace('[', '').rjust(15)
165
+ method = msg1[3].strip().strip()[-method_length:].strip().replace('[', '').rjust(method_length)
142
166
  if ']:(catTraceId:' in msg1[5]:
143
167
  trace_id = re.search(r'\(catTraceId:([^)]+)\)', msg0).group(1)
144
168
  other = msg0.partition(trace_id)[2][3:].strip()
@@ -152,7 +176,7 @@ def clean_up_msg_4(msg: str = None, trace_id_fixed_length: int = None) -> str:
152
176
  return msg
153
177
 
154
178
 
155
- def clean_up_msg_5(msg: str = None, trace_id_fixed_length: int = None) -> str:
179
+ def clean_up_msg_5(msg: str = None, trace_id_fixed_length: int = None, method_length: int = 31) -> str:
156
180
  try:
157
181
  """
158
182
  2025-10-10T09:59:36.118111+07:00 [http-nio-8080-exec-13][AUDIT.1070150904958674814][20251010095936118AUDIT04869][jcl_20250109000001][][MAIN] INFO - (catTraceId:xdecisionengine-0a1e0845-488906-400194) - putAll to context ,value={"app":"kredi","ip":"192.168.1.8","session_id":"","source_type":"ANDROID","product_name":"kredi"} - cn.xinfei.xdecision.engine.domain.context.PipelineContextHolder.()
@@ -173,7 +197,7 @@ def clean_up_msg_5(msg: str = None, trace_id_fixed_length: int = None) -> str:
173
197
  msg0, _, method = msg0.rpartition(' - ')
174
198
  else:
175
199
  method = msg0.rpartition(') - [')[2].strip().rpartition('] ')[0].strip()
176
- method = method.strip().replace('.()', '')[-31:].rjust(31)
200
+ method = method.strip().replace('.()', '')[-method_length:].rjust(method_length)
177
201
  trace_id = ''
178
202
  other = ''
179
203
  if '(catTraceId:' in msg0:
upplib/db.py CHANGED
@@ -173,10 +173,6 @@ def extract_sql(log_content: str = '') -> tuple[str | None, str | None]:
173
173
  return sql, total_sql
174
174
 
175
175
 
176
- def format_sql(sql: str) -> str:
177
- return sqlparse.format(sql, reindent=True, keyword_case="upper")
178
-
179
-
180
176
  def deal_sql(sql: str) -> str:
181
177
  sql = sql.replace('\n', ' ')
182
178
  sql = re.sub(r'\s+', ' ', sql).strip()
upplib/file.py CHANGED
@@ -3,36 +3,77 @@ from upplib.index import *
3
3
 
4
4
 
5
5
  def get_file(file_path: str = None,
6
- path_prefix: str = None,
7
- prefix: str = None,
6
+ path_startswith: str = None,
7
+ startswith: str = None,
8
8
  path_contain: str = None,
9
9
  contain: str = None,
10
- path_suffix: str = None,
11
- suffix: str = None) -> list[str]:
10
+ sort_asc: bool | None = True,
11
+ path_endswith: str = None,
12
+ endswith: str = None) -> list[str]:
12
13
  """
13
14
  有关文件的操作
14
15
  查询指定文件夹下面的所有的文件信息, 也可以是指定的文件
15
- file_path : 文件路径
16
- path_prefix : 文件路径,以 prefix 开头
17
- prefix : 文件名称,以 prefix 开头
18
- path_contain : 文件路径,含有
19
- contain : 文件名称,含有
20
- path_suffix : 文件路径,以 suffix 结尾
21
- suffix : 文件名称,以 suffix 结尾
16
+ file_path : 文件路径
17
+ path_startswith : 文件路径,以 path_startswith 开头
18
+ startswith : 文件名称,以 startswith 开头
19
+ path_contain : 文件路径,含有
20
+ contain : 文件名称,含有
21
+ sort_asc : 是否按升序排序,也就是从小到大排序
22
+ path_endswith : 文件路径,以 path_endswith 结尾
23
+ endswith : 文件名称,以 endswith 结尾
22
24
  return list
23
25
  """
24
26
  if file_path is None:
25
27
  file_path = os.path.dirname(os.path.abspath('.'))
26
28
  list_data = []
27
- get_file_all(file_path, list_data, path_prefix, prefix, path_contain, contain, path_suffix, suffix)
29
+ get_file_all(file_path, list_data, path_startswith, startswith, path_contain, contain, path_endswith, endswith)
28
30
  # 去一下重复的数据
29
- return list(set(list_data))
31
+ r_list = list(set(list_data))
32
+ if sort_asc is not None:
33
+ if sort_asc:
34
+ r_list.sort(reverse=False)
35
+ else:
36
+ r_list.sort(reverse=True)
37
+ return r_list
38
+
39
+
40
+ def get_file_folder(file_name_one: str = None) -> str:
41
+ """
42
+ 返回这个文件的文件夹路径
43
+ param file_name_one : 这个文件的全路径
44
+ return str : 这个文件的文件夹路径
45
+ """
46
+ file_sep = '\\' if is_win() else '/'
47
+ file_name_list = file_name_one.split(file_sep)
48
+ all_file_path = file_sep.join(file_name_list[0:-1])
49
+ return all_file_path
50
+
51
+
52
+ def remove_folder_file(file_path: str = None,
53
+ path_startswith: str = None,
54
+ startswith: str = None,
55
+ path_contain: str = None,
56
+ contain: str = None,
57
+ path_endswith: str = None,
58
+ endswith: str = None):
59
+ """
60
+ 删除指定文件夹下面的指定的文件
61
+ """
62
+ file_part_all_list = get_file(file_path=file_path,
63
+ path_startswith=path_startswith,
64
+ startswith=startswith,
65
+ path_contain=path_contain,
66
+ contain=contain,
67
+ path_endswith=path_endswith,
68
+ endswith=endswith)
69
+ for file_part_all_one in file_part_all_list:
70
+ os.remove(file_part_all_one)
30
71
 
31
72
 
32
73
  def get_folder(file_path: str = None,
33
- prefix: str = None,
74
+ startswith: str = None,
34
75
  contain: str = None,
35
- suffix: str = None) -> list[str]:
76
+ endswith: str = None) -> list[str]:
36
77
  """
37
78
  有关文件的操作, 只查询文件夹
38
79
  查询指定文件夹下面的所有的文件信息, 也可以是指定的文件
@@ -42,17 +83,17 @@ def get_folder(file_path: str = None,
42
83
  if file_path is None:
43
84
  file_path = os.path.dirname(os.path.abspath('.'))
44
85
  list_data = []
45
- get_folder_all(file_path, list_data, prefix, contain, suffix)
86
+ get_folder_all(file_path, list_data, startswith, contain, endswith)
46
87
  # 去一下重复的数据
47
88
  return list(set(list_data))
48
89
 
49
90
 
50
91
  # 是否包含指定的文件
51
92
  def contain_file(file_path: str = None,
52
- prefix: str = None,
93
+ startswith: str = None,
53
94
  contain: str = None,
54
- suffix: str = None) -> bool:
55
- return len(get_file(file_path, prefix, contain, suffix)) > 0
95
+ endswith: str = None) -> bool:
96
+ return len(get_file(file_path, startswith, contain, endswith)) > 0
56
97
 
57
98
 
58
99
  def get_file_data_line(file_path: str = None,
@@ -84,78 +125,77 @@ def get_file_data_line(file_path: str = None,
84
125
  # 查询指定文件夹下面的所有的文件信息, 也可以是指定的文件
85
126
  def get_file_all(file_path: str = None,
86
127
  list_data: list = None,
87
- path_prefix: str = None,
88
- prefix: str = None,
128
+ path_startswith: str = None,
129
+ startswith: str = None,
89
130
  path_contain: str = None,
90
131
  contain: str = None,
91
- path_suffix: str = None,
92
- suffix: str = None) -> None:
132
+ path_endswith: str = None,
133
+ endswith: str = None) -> None:
93
134
  if os.path.isdir(file_path):
94
135
  for root, dir_names, file_names in os.walk(file_path):
95
136
  for file_name in file_names:
96
- if (get_file_check(os.path.join(root, file_name), path_prefix, path_contain, path_suffix)
97
- and get_file_check(file_name, prefix, contain, suffix)):
137
+ if (get_file_check(os.path.join(root, file_name), path_startswith, path_contain, path_endswith)
138
+ and get_file_check(file_name, startswith, contain, endswith)):
98
139
  list_data.append(os.path.join(root, file_name))
99
140
  for dir_name in dir_names:
100
- get_file_all(os.path.join(root, dir_name), list_data, path_prefix, prefix, path_contain, contain, path_suffix, suffix)
101
- elif (get_file_check(file_path, prefix, contain, suffix)
102
- and get_file_check(file_path, path_prefix, path_contain, path_suffix)):
141
+ get_file_all(os.path.join(root, dir_name), list_data, path_startswith, startswith, path_contain, contain, path_endswith, endswith)
142
+ elif (get_file_check(file_path, startswith, contain, endswith)
143
+ and get_file_check(file_path, path_startswith, path_contain, path_endswith)):
103
144
  list_data.append(file_path)
104
145
 
105
146
 
106
147
  # 查询指定文件夹下面的所有的文件信息, 也可以是指定的文件
107
148
  def get_folder_all(file_path: str = None,
108
149
  list_data: list = None,
109
- prefix: str = None,
150
+ startswith: str = None,
110
151
  contain: str = None,
111
- suffix: str = None) -> None:
152
+ endswith: str = None) -> None:
112
153
  if os.path.isdir(file_path):
113
154
  for root, dir_names, file_names in os.walk(file_path):
114
155
  for dir_name in dir_names:
115
156
  dir_name_path = os.path.join(root, dir_name)
116
- if get_file_check(dir_name_path, prefix, contain, suffix):
157
+ if get_file_check(dir_name_path, startswith, contain, endswith):
117
158
  list_data.append(dir_name_path)
118
159
  else:
119
- get_folder_all(dir_name_path, list_data, prefix, contain, suffix)
160
+ get_folder_all(dir_name_path, list_data, startswith, contain, endswith)
120
161
 
121
162
 
122
- def get_file_check(
123
- name: str = None,
124
- prefix: str = None,
125
- contain: str = None,
126
- suffix: str = None) -> bool:
163
+ def get_file_check(name: str = None,
164
+ startswith: str = None,
165
+ contain: str = None,
166
+ endswith: str = None) -> bool:
127
167
  """
128
168
  检查文件是否符合要求
129
- prefix : 前缀
130
- contain : 包含这个字符
131
- suffix : 后缀
169
+ startswith : 以这个开头
170
+ contain : 包含这个字符
171
+ endswith : 以这个结尾
132
172
  """
133
173
  if name is None or name == '':
134
174
  return False
135
175
  p = True
136
176
  c = True
137
177
  s = True
138
- if prefix is not None:
139
- p = name.startswith(prefix)
178
+ if startswith is not None:
179
+ p = name.startswith(startswith)
140
180
  if contain is not None:
141
181
  c = name.find(contain) > -1
142
- if suffix is not None:
143
- s = name.endswith(suffix)
182
+ if endswith is not None:
183
+ s = name.endswith(endswith)
144
184
  return p and c and s
145
185
 
146
186
 
147
187
  def find_file_by_content(file_path: str = '',
148
188
  contain_txt: str = None,
149
- prefix: str = None,
189
+ startswith: str = None,
150
190
  contain: str = None,
151
- suffix: str = None) -> bool | None:
191
+ endswith: str = None) -> bool | None:
152
192
  """
153
193
  检查文件内容是否包含指定的字符串
154
194
  慎用,否则, 执行时间可能比较长
155
195
  """
156
- list_file = get_file(file_path, prefix, contain, suffix)
196
+ list_file = get_file(file_path, startswith, contain, endswith)
157
197
  if len(list_file) == 0:
158
- to_log(f'no_matched_file : {file_path} , {contain_txt} , {prefix} , {contain} , {suffix}')
198
+ to_log(f'no_matched_file : {file_path} , {contain_txt} , {startswith} , {contain} , {endswith}')
159
199
  return False
160
200
  if contain_txt is None:
161
201
  to_log(list_file)