pilot.linkstec 0.0.32__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. pilot/base/__init__.py +0 -0
  2. pilot/base/ai_call.py +38 -0
  3. pilot/base/ai_info.py +20 -0
  4. pilot/base/chage_file_tag_base.py +73 -0
  5. pilot/base/db_operation_base.py +536 -0
  6. pilot/base/delete_commnents_base.py +306 -0
  7. pilot/base/file_operation.py +44 -0
  8. pilot/base/get_file_encoding.py +14 -0
  9. pilot/base/make_parsing_java_file_order_base.py +154 -0
  10. pilot/base/split_file_base.py +256 -0
  11. pilot/create_python/__init__.py +0 -0
  12. pilot/create_python/config/__init__.py +0 -0
  13. pilot/create_python/create_python.py +150 -0
  14. pilot/create_python/sample/__init__.py +0 -0
  15. pilot/create_python/sample/child_sample/__init__.py +0 -0
  16. pilot/create_python/sample/child_sample/job/__init__.py +0 -0
  17. pilot/create_python/sample/config/__init__.py +0 -0
  18. pilot/db/__init__.py +0 -0
  19. pilot/db/create_table.py +34 -0
  20. pilot/db/db_connect.py +49 -0
  21. pilot/db/db_main.py +293 -0
  22. pilot/db/db_util.py +508 -0
  23. pilot/db/ddl/__init__.py +18 -0
  24. pilot/db/dml/__init__.py +18 -0
  25. pilot/db/sql_executor.py +62 -0
  26. pilot/db/sql_loader.py +233 -0
  27. pilot/db/sql_service.py +55 -0
  28. pilot/file_tool/__init__.py +0 -0
  29. pilot/file_tool/create_prompt_file.py +75 -0
  30. pilot/file_tool/json_file_tool.py +103 -0
  31. pilot/prompt/__init__.py +0 -0
  32. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.90.dist-info}/METADATA +1 -1
  33. pilot_linkstec-0.0.90.dist-info/RECORD +66 -0
  34. pilot_linkstec-0.0.32.dist-info/RECORD +0 -35
  35. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.90.dist-info}/WHEEL +0 -0
  36. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.90.dist-info}/licenses/LICENSE +0 -0
  37. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.90.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,306 @@
1
+ import copy
2
+ from pathlib import Path
3
+ from typing import List, Dict
4
+
5
+ from lxml import etree as ET
6
+
7
+ from pilot.job.impl.base_job import BaseJob
8
+
9
+ from base.file_operation import read_file_lines
10
+ from db.sql_service import delete_sql_info, insert_sql_info
11
+
12
+
13
+ class DeleteComment(BaseJob):
14
+
15
+ @staticmethod
16
+ def _remove_java_comments(java_code: str):
17
+ """
18
+ 用状态机把 Java 代码中的注释全部去掉。
19
+
20
+ 参数
21
+ java_code: 完整的 Java 源码(字符串)
22
+
23
+ 返回
24
+ 去掉注释后的源码
25
+ """
26
+ NORMAL = 0 # 普通代码
27
+ LINE_COMMENT = 1 # // 注释
28
+ BLOCK_COMMENT = 2 # /* … */ 或 /** … */
29
+ STRING_LITERAL = 3 # "…"(包括转义字符)
30
+
31
+ state = NORMAL
32
+ i = 0
33
+ n = len(java_code)
34
+ out: List[str] = []
35
+
36
+ while i < n:
37
+ ch = java_code[i]
38
+
39
+ # ---------- 普通代码 ----------
40
+ if state == NORMAL:
41
+ if ch == '/' and i + 1 < n: # 可能是注释的起始
42
+ nxt = java_code[i + 1]
43
+ if nxt == '/': # // 单行注释
44
+ state = LINE_COMMENT
45
+ i += 2
46
+ continue
47
+ elif nxt == '*': # /* 块注释(包括 /**)
48
+ state = BLOCK_COMMENT
49
+ i += 2
50
+ continue
51
+ else:
52
+ out.append(ch)
53
+ elif ch == '"': # 字符串开始
54
+ state = STRING_LITERAL
55
+ out.append(ch)
56
+ else:
57
+ out.append(ch)
58
+
59
+ # ---------- 行注释 ----------
60
+ elif state == LINE_COMMENT:
61
+ if ch == '\n': # 行结束,回到普通状态
62
+ out.append(ch) # 保留换行,使代码行号不变
63
+ state = NORMAL
64
+ # 其余字符直接丢弃(注释内容)
65
+
66
+ # ---------- 块注释 ----------
67
+ elif state == BLOCK_COMMENT:
68
+ if ch == '*' and i + 1 < n and java_code[i + 1] == '/':
69
+ state = NORMAL
70
+ i += 2 # 跳过结束符 */
71
+ continue
72
+ # 块注释内部全部丢弃,遇到换行仍保留,以免影响后续行号
73
+ if ch == '\n':
74
+ out.append('\n')
75
+
76
+ # ---------- 字符串 ----------
77
+ elif state == STRING_LITERAL:
78
+ out.append(ch)
79
+ if ch == '\\' and i + 1 < n: # 转义字符,跳过下一个字符
80
+ out.append(java_code[i + 1])
81
+ i += 2
82
+ continue
83
+ if ch == '"': # 字符串结束(未被转义)
84
+ state = NORMAL
85
+
86
+ i += 1
87
+
88
+ return ''.join(out)
89
+
90
+ def _remove_sql_comments(self, xml_path: Path) -> tuple[ET.Element, List[Dict[str, str]]]:
91
+ """
92
+ 读取 MyBatis XML,完成三件事:
93
+ 1️⃣ 展开所有 <include>(递归)。
94
+ 2️⃣ 删除已展开的 <sql id="…"> 片段(可选)。
95
+ 3️⃣ **删除所有 XML 注释**,确保输出文件中不再出现 <!-- … -->。
96
+ 返回:
97
+ - expanded_root : 已展开且已删除注释/无用 <sql> 的 Element。
98
+ - statements : List[{'id','type','sql'}] 已展开的完整 SQL 文本。
99
+ """
100
+ parser = ET.XMLParser(remove_blank_text=False) # 保留原始换行、缩进
101
+ tree = ET.parse(str(xml_path), parser=parser)
102
+ root = tree.getroot()
103
+
104
+ # 1️⃣ 收集 <sql> 片段
105
+ fragments = self.build_sql_fragments(root)
106
+
107
+ # 2️⃣ 展开 <include>
108
+ self._expand_node(root, fragments)
109
+
110
+ # 3️⃣ 删除已经展开的 <sql>(如果你想保留,只需注释掉下面这行)
111
+ self._remove_unused_sql(root)
112
+
113
+ # 4️⃣ **删除所有 XML 注释**
114
+ self._remove_all_comments(root)
115
+
116
+ # 5️⃣(可选)写回文件,使用 pretty_print 保持缩进
117
+ expanded_xml = ET.tostring(
118
+ root,
119
+ encoding="utf-8",
120
+ xml_declaration=True, # 与原文件保持 <?xml …?> 头部
121
+ pretty_print=True # 美化缩进,便于阅读或打印
122
+ )
123
+
124
+ return expanded_xml
125
+
126
+ # -------------------------------------------------
127
+ # 3️⃣ 收集 <sql id="…"> 片段
128
+ # -------------------------------------------------
129
+ @staticmethod
130
+ def build_sql_fragments(root: ET.Element) -> Dict[str, ET.Element]:
131
+ """返回 {id : deepcopy(<sql …>)},防止后续修改原片段。"""
132
+ fragments = {}
133
+ for sql_el in root.findall(".//sql"):
134
+ sid = sql_el.get("id")
135
+ if not sid:
136
+ continue
137
+ fragments[sid] = copy.deepcopy(sql_el)
138
+
139
+ for sql_el in root.findall(".//select"):
140
+ sid = sql_el.get("id")
141
+ if not sid:
142
+ continue
143
+ fragments[sid] = copy.deepcopy(sql_el)
144
+
145
+ return fragments
146
+
147
+ @staticmethod
148
+ def _strip_text(text: str) -> str:
149
+ """去掉首尾空白,保留内部换行与缩进。"""
150
+ if text is None:
151
+ return ""
152
+ lines = text.splitlines()
153
+ # 去掉全空行,右侧空格保留左侧缩进
154
+ lines = [ln.rstrip() for ln in lines if ln.strip() != ""]
155
+ return "\n".join(lines)
156
+
157
+ # -------------------------------------------------
158
+ # 4️⃣ 展开 <include>(递归)
159
+ # -------------------------------------------------
160
+ def _expand_node(self, node: ET.Element, fragments: Dict[str, ET.Element]) -> None:
161
+ """
162
+ 递归遍历 node 子树,遇到 <include> 用对应 fragment 替换。
163
+ """
164
+ for child in list(node): # 复制列表,遍历时可增删
165
+ if child.tag == "include":
166
+ refid = child.get("refid")
167
+ if not refid:
168
+ raise ValueError("<include> without refid attribute")
169
+
170
+ fragment = fragments.get(refid)
171
+ if fragment is None:
172
+ raise KeyError(f"SQL fragment id='{refid}' not found")
173
+
174
+ # 复制并递归展开(防止 fragment 本身还有 <include>)
175
+ frag_copy = copy.deepcopy(fragment)
176
+ self._expand_node(frag_copy, fragments)
177
+
178
+ # ---- 替换过程 ----
179
+ parent = child.getparent()
180
+ idx = parent.index(child)
181
+
182
+ # 把 <include> 原本的 tail(如果有)拼到 fragment 最后一个节点的 tail
183
+ after = child.tail or ""
184
+
185
+ if len(frag_copy) == 0: # 纯文本片段
186
+ txt = self._strip_text(frag_copy.text or "")
187
+ if idx == 0:
188
+ parent.text = (parent.text or "") + txt + after
189
+ else:
190
+ prev = parent[idx - 1]
191
+ prev.tail = (prev.tail or "") + txt + after
192
+ parent.remove(child)
193
+ else:
194
+ # 把子元素逐个插入到原来的位置
195
+ for sub in list(frag_copy):
196
+ parent.insert(idx, sub)
197
+ idx += 1
198
+
199
+ # 把 fragment.text(如果有)挂到第一个子节点的 .text
200
+ if frag_copy.text:
201
+ first = parent[idx - len(list(frag_copy))]
202
+ first.text = (first.text or "") + frag_copy.text
203
+
204
+ # 把 after(原 <include> 的 tail)挂到最后一个插入节点的 tail
205
+ if after:
206
+ last = parent[idx - 1]
207
+ last.tail = (last.tail or "") + after
208
+
209
+ # 删除原 <include>
210
+ parent.remove(child)
211
+
212
+ else:
213
+ self._expand_node(child, fragments)
214
+
215
+ # -------------------------------------------------
216
+ # 5️⃣ 删除已经展开的 <sql> 片段(可选)
217
+ # -------------------------------------------------
218
+ @staticmethod
219
+ def _remove_unused_sql(root: ET.Element) -> None:
220
+ """在展开完后把所有 <sql id="…"> 元素从文档中删除。"""
221
+ for sql_el in root.findall(".//sql"):
222
+ parent = sql_el.getparent()
223
+ if parent is not None:
224
+ parent.remove(sql_el)
225
+
226
+ def _remove_comment(self, comment: ET._Comment) -> None:
227
+ """安全删除注释节点并保留它的 tail。"""
228
+ parent = comment.getparent()
229
+ if parent is None:
230
+ return
231
+ self._splice_tail_before(comment, comment.tail) # 先搬走 tail
232
+ parent.remove(comment) # 再删节点
233
+
234
+ def _remove_all_comments(self, root: ET._Element) -> None:
235
+ """
236
+ 递归遍历整棵树,删除所有 Comment 节点,同时保留它们的 tail(即注释后面的 SQL)。
237
+ """
238
+ for child in list(root): # 用 list() 复制,防止遍历时结构被修改
239
+ if child.tag is ET.Comment: # ✅ 这里使用 tag 比较,安全可靠
240
+ self._remove_comment(child)
241
+ else:
242
+ self._remove_all_comments(child)
243
+
244
+ # -------------------------------------------------
245
+ # 6️⃣ 删除所有 XML 注释(<!-- … -->)
246
+ # -------------------------------------------------
247
+ def _splice_tail_before(self, node: ET._Element, tail: str) -> None:
248
+ """把 node.tail 合并到前一个兄弟的 tail(或父节点的 text)中。"""
249
+ if not tail:
250
+ return
251
+
252
+ prev = node.getprevious()
253
+ if prev is not None: # 有前一个兄弟元素
254
+ prev.tail = (prev.tail or "") + tail
255
+ else: # 没有前一个兄弟,说明 node 是父节点的第一个子元素
256
+ parent = node.getparent()
257
+ if parent is not None:
258
+ parent.text = (parent.text or "") + tail
259
+
260
+ @staticmethod
261
+ def _insert_sql_file_info(file_name, file_path, file_out_path_str):
262
+
263
+ params_delete_file_info = {
264
+ "mapper_file_name": file_name.split('.')[0]
265
+ }
266
+ delete_sql_info('delete_sql_file_info', params_delete_file_info)
267
+
268
+ params_file_info = {
269
+ "mapper_file_name": file_name.split('.')[0],
270
+ "original_file_path": file_path,
271
+ "no_comment_file_path": file_out_path_str
272
+
273
+ }
274
+ insert_sql_info('sql_file_info', params_file_info)
275
+
276
+ def run(self):
277
+ try:
278
+
279
+ lines = read_file_lines(self.file_path)
280
+ str_code = ''.join(lines)
281
+
282
+ remove_file_type = self.__getattribute__('file_type')
283
+ output_file_path = self.__getattribute__('target_file_path')
284
+
285
+ match remove_file_type:
286
+ case 'Java':
287
+ del_comment_code = self._remove_java_comments(str_code)
288
+
289
+ with open(output_file_path, "w", encoding="utf-8") as fp:
290
+ fp.write(del_comment_code)
291
+ case 'mybatis':
292
+ del_comment_code = self._remove_sql_comments(Path(self.file_path))
293
+ output_file_path.write_bytes(del_comment_code)
294
+ file_name = Path(self.file_path).name.split('.')[0]
295
+ self._insert_sql_file_info(file_name,self.file_path , str(output_file_path))
296
+
297
+ case _:
298
+ del_comment_code =''
299
+
300
+
301
+
302
+ except Exception as e:
303
+ self.logger.error(f"{__name__}異常終了. {e}")
304
+ return
305
+
306
+ super().run()
@@ -0,0 +1,44 @@
1
+ import json
2
+ import os.path
3
+ from pathlib import Path
4
+
5
+ from typing import List, Dict, Any
6
+ from venv import logger
7
+
8
+ from base.get_file_encoding import get_file_encoding
9
+
10
+ #
11
+ def read_file_lines(file_path):
12
+ if not os.path.exists(file_path):
13
+ logger.error(f"ファイルが存在しない。{file_path}")
14
+ return None
15
+ file_encoding = get_file_encoding(file_path)
16
+ with open(file_path, 'r', encoding=file_encoding) as f:
17
+ _lines = f.readlines()
18
+ return _lines
19
+
20
+ def read_json_file_lines(json_file_path):
21
+ if not os.path.exists(json_file_path):
22
+ logger.error(f"JSONファイルが存在しない。{json_file_path}")
23
+ return None
24
+
25
+ json_file_encoding = get_file_encoding(json_file_path)
26
+ with open(json_file_path, 'r', encoding=json_file_encoding) as fp:
27
+ return json.load(fp) # -> List[Dict]
28
+
29
+ def write_file_line(file_path, file_content):
30
+ path = Path(file_path)
31
+ os.makedirs(str(path.parent), exist_ok=True)
32
+ with path.open('w', encoding="utf-8") as f:
33
+ f.write(file_content)
34
+
35
+ def write_json_file(data: List[Dict[str, Any]], json_file_path) -> None:
36
+ """把列表写回 JSON 文件,使用 4 空格缩进,保证可读性。"""
37
+ path = Path(json_file_path)
38
+ with path.open("w", encoding="utf-8") as f:
39
+ json.dump(data, f, ensure_ascii=False, indent=4)
40
+
41
+ def write_java_files(file_path, code):
42
+ real_code = code.replace(r'\n', '\n')
43
+ with file_path.open("w", encoding="utf-8") as f:
44
+ f.write(real_code.rstrip()+ '\n')
@@ -0,0 +1,14 @@
1
+ import chardet
2
+
3
+ def get_file_encoding(filename):
4
+ # 读取文件,返回文件编码
5
+ with open(filename, 'rb') as f1:
6
+ data = f1.read()
7
+ result = chardet.detect(data)
8
+ encoding = result['encoding']
9
+
10
+ if encoding and encoding.lower() != 'utf-8':
11
+ encoding = 'sjis'
12
+ elif not encoding:
13
+ encoding = 'sjis'
14
+ return encoding
@@ -0,0 +1,154 @@
1
+ import fnmatch
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, Any
5
+
6
+ from pilot.job.impl.base_job import BaseJob
7
+
8
+ from base.file_operation import write_json_file
9
+ from db.sql_service import select_sql_info
10
+
11
+ class MakeParsingJavaOrderBase(BaseJob):
12
+
13
+ def get_no_sub_method_java_list(self):
14
+
15
+ search_result = select_sql_info('sql_no_sub_method_info', {})
16
+ parsing_no_sub_method_list = []
17
+ if search_result:
18
+ for result_info in search_result:
19
+ str_class_name = result_info.get('CLASS_NAME')
20
+ str_method_name = result_info.get('METHOD_NAME')
21
+ str_package_name = result_info.get('PACKAGE_NAME')
22
+
23
+ if '.dao' not in str_package_name:
24
+ params_get_count_sub_method_info= {
25
+ "sub_method_name": str_method_name,
26
+ "called_method_class": str_package_name + '.' + str_class_name
27
+ }
28
+
29
+ search_count_result = self.get_count_sub_method_info(params_get_count_sub_method_info)
30
+ if search_count_result > 0:
31
+ no_sub_method_java_info = str_class_name + ',' + str_method_name + ',' + str_package_name
32
+ parsing_no_sub_method_list.append(no_sub_method_java_info)
33
+
34
+ return parsing_no_sub_method_list
35
+
36
+ def get_dao_method_list(self):
37
+ search_result = select_sql_info('sql_get_dao_method_info', {'package_name': '%.dao%'})
38
+ parsing_dao_list = []
39
+ if search_result:
40
+ for result_info in search_result:
41
+
42
+ _str_class_name = result_info.get('CLASS_NAME')
43
+ str_method_name = result_info.get('METHOD_NAME')
44
+ str_package_name = result_info.get('PACKAGE_NAME')
45
+
46
+ str_class_name = _str_class_name.replace('_' + str_method_name, '')
47
+
48
+ params_get_count_sub_method_info = {
49
+ "sub_method_name": str_method_name,
50
+ "called_method_class": str_package_name + '.' + str_class_name
51
+ }
52
+
53
+ search_count_result = self.get_count_sub_method_info(params_get_count_sub_method_info)
54
+
55
+ if search_count_result > 0:
56
+ doa_info = str_class_name + ',' + str_method_name + ',' + str_package_name
57
+ parsing_dao_list.append(doa_info)
58
+
59
+ return parsing_dao_list
60
+
61
+ def other_parsing_method(self, json_list, round_num):
62
+
63
+ while json_list:
64
+ values_str_list = []
65
+ for json_info in json_list:
66
+ json_info_arr = json_info.split(',')
67
+ class_name = json_info_arr[0]
68
+ method_name = json_info_arr[1]
69
+ package_name = json_info_arr[2]
70
+
71
+ values_str_list.append((class_name, method_name))
72
+ values_str = ",\n".join(
73
+ f"('{cls}', '{method}')" for cls, method in values_str_list
74
+ )
75
+ search_result = select_sql_info('sql_get_sub_method_info_all_list', {'values_str': values_str_list})
76
+
77
+ if not search_result:
78
+ break
79
+
80
+ parsing_java = []
81
+
82
+ for result_info in search_result:
83
+ _str_class_name = result_info.get('CLASS_NAME')
84
+ str_method_name = result_info.get('METHOD_NAME')
85
+ str_package_name = result_info.get('PACKAGE_NAME')
86
+
87
+ str_class_name = _str_class_name.replace('_' + str_method_name, '')
88
+
89
+ params_get_count_sub_method_info = {
90
+ "sub_method_name": str_method_name,
91
+ "called_method_class": str_package_name + '.' + str_class_name
92
+ }
93
+
94
+ search_count_result = self.get_count_sub_method_info(params_get_count_sub_method_info)
95
+ if search_count_result > 0:
96
+ parsing_java_info_1 = str_class_name + ',' + str_method_name + ',' + str_package_name
97
+ parsing_java.append(parsing_java_info_1)
98
+
99
+ json_file_name = os.path.basename(self.file_path).split('.')[0] + '_' + str(round_num) + '.json'
100
+ json_file_path = str(os.path.join(os.path.dirname(self.file_path), json_file_name))
101
+
102
+ write_json_file(parsing_java, json_file_path)
103
+
104
+ round_num += 1
105
+ json_list = parsing_java
106
+
107
+
108
+ @staticmethod
109
+ def get_count_sub_method_info(params: Dict[str, Any]) -> int:
110
+
111
+ search_count_result = select_sql_info('get_count_sub_method_info', params)
112
+ if search_count_result:
113
+ for count_info in search_count_result:
114
+ count = count_info.get('COUNT')
115
+ return count
116
+
117
+ def run(self):
118
+ try:
119
+
120
+ file_name = os.path.splitext(os.path.basename(self.file_path))[0]
121
+
122
+ no_sub_method_java_list = self.get_no_sub_method_java_list()
123
+ dao_list = self.get_dao_method_list()
124
+ search_list = no_sub_method_java_list + dao_list
125
+
126
+ count_index = 0
127
+
128
+ if no_sub_method_java_list:
129
+ count_index = count_index + 1
130
+ out_file_name = file_name + '_' + str(count_index) + '.json'
131
+ no_sub_method_json_path = os.path.join(os.path.dirname(self.file_path), out_file_name)
132
+ write_json_file(no_sub_method_java_list, no_sub_method_json_path)
133
+
134
+ if dao_list:
135
+ count_index = count_index + 1
136
+ out_file_name = file_name + '_' + str(count_index) + '.json'
137
+ dao_json_path = os.path.join(os.path.dirname(self.file_path), out_file_name)
138
+ write_json_file(dao_list, dao_json_path)
139
+
140
+ if search_list:
141
+ self.other_parsing_method(search_list, count_index + 1)
142
+
143
+ matches = []
144
+ for root, dirs, files in os.walk(Path(self.file_path).parent):
145
+ for filename in fnmatch.filter(files, "*.json"):
146
+ matches.append(os.path.join(root, filename))
147
+
148
+ setattr(self, '_list', matches)
149
+
150
+ except Exception as e:
151
+ self.logger.error(f"{__name__}異常終了. {e}")
152
+ return
153
+
154
+ super().run()