pilot.linkstec 0.0.32__py3-none-any.whl → 0.0.91__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. pilot/base/__init__.py +0 -0
  2. pilot/base/ai_call.py +38 -0
  3. pilot/base/ai_info.py +20 -0
  4. pilot/base/chage_file_tag_base.py +73 -0
  5. pilot/base/db_operation_base.py +536 -0
  6. pilot/base/delete_commnents_base.py +306 -0
  7. pilot/base/file_operation.py +44 -0
  8. pilot/base/get_file_encoding.py +14 -0
  9. pilot/base/make_parsing_java_file_order_base.py +154 -0
  10. pilot/base/split_file_base.py +256 -0
  11. pilot/client/__init__.py +0 -0
  12. pilot/client/ai_client.py +75 -0
  13. pilot/config/config_reader.py +81 -43
  14. pilot/create_python/__init__.py +0 -0
  15. pilot/create_python/config/__init__.py +0 -0
  16. pilot/create_python/create_python.py +150 -0
  17. pilot/create_python/sample/__init__.py +0 -0
  18. pilot/create_python/sample/child_sample/__init__.py +0 -0
  19. pilot/create_python/sample/child_sample/job/__init__.py +0 -0
  20. pilot/create_python/sample/config/__init__.py +0 -0
  21. pilot/db/__init__.py +0 -0
  22. pilot/db/create_table.py +34 -0
  23. pilot/db/db_connect.py +49 -0
  24. pilot/db/db_main.py +293 -0
  25. pilot/db/db_util.py +508 -0
  26. pilot/db/ddl/__init__.py +18 -0
  27. pilot/db/dml/__init__.py +18 -0
  28. pilot/db/sql_executor.py +62 -0
  29. pilot/db/sql_loader.py +233 -0
  30. pilot/db/sql_service.py +55 -0
  31. pilot/file_tool/__init__.py +0 -0
  32. pilot/file_tool/create_prompt_file.py +75 -0
  33. pilot/file_tool/json_file_tool.py +103 -0
  34. pilot/job/base/__init__.py +0 -0
  35. pilot/job/base/convert/__init__.py +0 -0
  36. pilot/job/base/convert/encodingTransformerJob.py +16 -0
  37. pilot/job/base/convert/tabReplaceJob.py +27 -0
  38. pilot/job/base/generate/__init__.py +0 -0
  39. pilot/job/base/generate/generateJsonBaseJob.py +42 -0
  40. pilot/job/base/generate/generateTextBaseJob.py +40 -0
  41. pilot/job/impl/base_job.py +4 -0
  42. pilot/prompt/__init__.py +0 -0
  43. pilot/unit/impl/base_unit.py +1 -0
  44. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.91.dist-info}/METADATA +1 -1
  45. pilot_linkstec-0.0.91.dist-info/RECORD +75 -0
  46. pilot_linkstec-0.0.32.dist-info/RECORD +0 -35
  47. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.91.dist-info}/WHEEL +0 -0
  48. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.91.dist-info}/licenses/LICENSE +0 -0
  49. {pilot_linkstec-0.0.32.dist-info → pilot_linkstec-0.0.91.dist-info}/top_level.txt +0 -0
pilot/db/sql_loader.py ADDED
@@ -0,0 +1,233 @@
1
+ # --------------------------------------------------------------
2
+ # sql_loader.py – 通用的 SQL 加载 / 参数渲染工具
3
+ # --------------------------------------------------------------
4
+ import pathlib
5
+ import re
6
+ from typing import Any, Mapping, Tuple, Union, Iterable, List
7
+ import importlib.resources as pkg_resources # Python 3.7+
8
+
9
+ # ------------------------------------------------------------------
10
+ # 默认根目录:与本文件同级的 “sql” 目录(保持向后兼容)
11
+ # ------------------------------------------------------------------
12
+ DEFAULT_SQL_ROOT = pathlib.Path(__file__).parent
13
+
14
+
15
+ def _read_from_filesystem(root: pathlib.Path, relative_path: str) -> str:
16
+ """
17
+ 从磁盘文件系统读取 SQL。
18
+ 参数
19
+ ----
20
+ root : pathlib.Path
21
+ sql 根目录(例如 Path('sql'))
22
+ relative_path : str
23
+ 相对于根目录的路径,如 'ddl/create_user_table.sql'
24
+ """
25
+ file_path = root / relative_path
26
+ if not file_path.is_file():
27
+ raise FileNotFoundError(f"SQL file not found: {file_path}")
28
+ return file_path.read_text(encoding="utf-8")
29
+
30
+
31
+ def _read_from_package(package: str, relative_path: str) -> str:
32
+ """
33
+ 从 Python 包(资源)读取 SQL。
34
+ 参数
35
+ ----
36
+ package : str
37
+ 包名,例如 'dml'、'my_project.dml'
38
+ relative_path : str
39
+ 包内部的相对路径,如 'ddl/create_user_table.sql'
40
+ """
41
+ # pkg_resources.open_text 会自动处理编码、BOM 等细节
42
+ try:
43
+ with pkg_resources.open_text(package, relative_path,
44
+ encoding="utf-8-sig") as f:
45
+ return f.read()
46
+ except FileNotFoundError as exc:
47
+ raise FileNotFoundError(
48
+ f"SQL resource not found: package={package!r}, file={relative_path}"
49
+ ) from exc
50
+
51
+
52
+ def _read_sql_file(relative_path: str,
53
+ base_path: Union[pathlib.Path, str] = DEFAULT_SQL_ROOT
54
+ ) -> str:
55
+ """
56
+ 统一入口:根据 ``base_path`` 类型决定读取方式。
57
+ * Path → 从磁盘文件系统读取
58
+ * str → 当作包名,使用 importlib.resources 读取资源文件
59
+ """
60
+ if isinstance(base_path, pathlib.Path):
61
+ return _read_from_filesystem(base_path, relative_path)
62
+ elif isinstance(base_path, str):
63
+ return _read_from_package(base_path, relative_path)
64
+ else:
65
+ raise TypeError(
66
+ "base_path 必须是 pathlib.Path(文件系统)或 str(包名),"
67
+ f"实际得到 {type(base_path)!r}"
68
+ )
69
+
70
+
71
+ # ----------------------------------------------------------------------
72
+ # 1️⃣ 正则:捕获 MyBatis‐style #{name} / ${name}
73
+ # ----------------------------------------------------------------------
74
+ _RE_PLACEHOLDER = re.compile(r'''
75
+ (?:\#|\$) # 开头是 # 或 $
76
+ \{ # 左花括号
77
+ \s* # 可选空白
78
+ (?P<name>[^}]+?) # 参数名(非贪婪)
79
+ \s* # 可选空白
80
+ \} # 右花括号
81
+ ''', re.VERBOSE)
82
+
83
+
84
+ def _replace_mybatis_placeholders(sql: str,
85
+ use_named: bool) -> Tuple[str, List[str]]:
86
+ """
87
+ 把 ``#{name}`` / ``${name}`` 替换为 SQLite 占位符。
88
+
89
+ Parameters
90
+ ----------
91
+ sql : str
92
+ 原始 SQL(可能包含 MyBatis 占位符)。
93
+ use_named : bool
94
+ * ``True`` → 使用命名占位符 ``:name``;
95
+ * ``False`` → 使用位置占位符 ``?``.
96
+
97
+ Returns
98
+ -------
99
+ Tuple[str, List[str]]
100
+ * 第 1 项:替换后的 SQL。
101
+ * 第 2 项:出现的参数名顺序列表(仅在 ``use_named=False`` 时有意义)。
102
+ """
103
+ if use_named: # 直接把 #{xxx} 替换为 :xxx
104
+ new_sql = _RE_PLACEHOLDER.sub(lambda m: f":{m.group('name').strip()}", sql)
105
+ return new_sql, [] # 参数顺序不需要
106
+
107
+ else:
108
+ # 把所有占位符统一为 “?”
109
+ names: List[str] = []
110
+
111
+ def repl(m: re.Match) -> str:
112
+ names.append(m.group('name').strip())
113
+ return "?"
114
+
115
+ new_sql = _RE_PLACEHOLDER.sub(repl, sql)
116
+ return new_sql, names
117
+
118
+
119
+ # ----------------------------------------------------------------------
120
+ # 2️⃣ 主函数
121
+ # ----------------------------------------------------------------------
122
+ def render_sql(
123
+ template: str,
124
+ params: Union[Mapping[str, Any], Iterable[Any], None] = None,
125
+ ) -> Tuple[
126
+ str,
127
+ Union[Mapping[str, Any], Tuple[Any, ...]]
128
+ ]:
129
+
130
+ # -------------------------------------------------
131
+ # ① 参数为 None → 只做占位符统一(全部转成 ?)
132
+ # -------------------------------------------------
133
+ if params is None:
134
+ new_sql, _ = _replace_mybatis_placeholders(template, use_named=False)
135
+ return new_sql, ()
136
+
137
+ # -------------------------------------------------
138
+ # ② 参数为 Mapping → 使用命名占位符
139
+ # -------------------------------------------------
140
+ if isinstance(params, Mapping):
141
+ new_sql, _ = _replace_mybatis_placeholders(template, use_named=True)
142
+ return new_sql, params
143
+
144
+ # -------------------------------------------------
145
+ # ③ 参数为 Iterable(但不是 Mapping) → 使用位置占位符
146
+ # -------------------------------------------------
147
+ if isinstance(params, Iterable):
148
+ # 先把模板里的 MyBatis 占位符全部换成 ?
149
+ new_sql, name_order = _replace_mybatis_placeholders(template,
150
+ use_named=False)
151
+
152
+ # 1) 如果用户本来就传的是 list/tuple 等,直接转 tuple
153
+ # 2) 如果用户误把 dict 当成 Iterable(dict 本身是 Iterable),
154
+ # 那么我们把它当作 Mapping 处理,以免产生错误的顺序。
155
+ if isinstance(params, Mapping):
156
+ # 把 dict 按 name_order 取值,保持顺序一致
157
+ ordered_vals = tuple(params[name] for name in name_order)
158
+ else:
159
+ ordered_vals = tuple(params)
160
+
161
+ # 当模板里没有 MyBatis 占位符时 name_order 为 [],此时
162
+ # ordered_vals 仍然是用户提供的 tuple/list → 正常使用。
163
+ return new_sql, ordered_vals
164
+
165
+ # -------------------------------------------------
166
+ # ④ 其它类型(不应该出现) → 抛出友好错误
167
+ # -------------------------------------------------
168
+ raise TypeError(
169
+ "params 必须是 Mapping、Iterable 或 None,"
170
+ f"但收到的是 {type(params)!r}"
171
+ )
172
+
173
+
174
+ # ------------------------------------------------------------------
175
+ # 一站式加载 + 渲染
176
+ # ------------------------------------------------------------------
177
+ def load_sql(relative_path: str,
178
+ params: Union[Mapping[str, Any],
179
+ Iterable[Any],
180
+ None] = None,
181
+ base_path: Union[pathlib.Path, str] = DEFAULT_SQL_ROOT
182
+ ) -> Tuple[str,
183
+ Union[Mapping[str, Any], Tuple[Any, ...]]]:
184
+ """
185
+ 读取 SQL →(可选)渲染参数 → 返回 ``(sql, parameters)``。
186
+
187
+ 参数
188
+ ----
189
+ relative_path : str
190
+ 相对于根目录/包的路径,如 ``'ddl/create_user_table.sql'``。
191
+ params : Mapping / Iterable / None
192
+ 传给 ``sqlite3`` 的参数对象,渲染规则同 ``render_sql``。
193
+ base_path : pathlib.Path | str
194
+ *Path* → 读取磁盘目录(默认 ``DEFAULT_SQL_ROOT``)<br>
195
+ *str* → 当作 **包名**,使用 ``importlib.resources`` 从包中读取。
196
+ """
197
+ raw_sql = _read_sql_file(relative_path, base_path=base_path)
198
+ # -------------------------------------------------
199
+ # ① 若傳入的 params 含有 `values_str`(pattern‑2 用法)
200
+ # -------------------------------------------------
201
+ if isinstance(params, Mapping) and "values_str" in params:
202
+ # 把傳入的二元組列表渲染成 VALUES‑list
203
+ values_sql = _render_values_str(params["values_str"])
204
+ # 替換模板中預留的佔位符(我們約定使用 /*{values_str}*/)
205
+ raw_sql = raw_sql.replace("/*{values_str}*/", values_sql)
206
+
207
+ # 把 `values_str` 從 params 中剔除,避免後續被當成普通參數傳給 sqlite3
208
+ # (如果你想保留其他命名參數,仍然可以放在同一個 dict 中)
209
+ params = {k: v for k, v in params.items() if k != "values_str"}
210
+
211
+ return render_sql(raw_sql, params)
212
+
213
+ # ----------------------------------------------------------------------
214
+ # ② 把 values_str 渲染成 VALUES‑list ------------------------------------
215
+ # ----------------------------------------------------------------------
216
+ def _render_values_str(
217
+ values: Iterable[Tuple[Any, Any]]
218
+ ) -> str:
219
+ """
220
+ 把 [(cls1, sub1), (cls2, sub2), …] 轉成
221
+
222
+ ('cls1','sub1'),('cls2','sub2'),...
223
+
224
+ 只在 pattern‑2 中使用。會自動把單引號轉義為兩個單引號。
225
+ """
226
+ def esc(v: Any) -> str:
227
+ # 只處理字串型別,其他類型直接轉成 str
228
+ if isinstance(v, str):
229
+ return v.replace("'", "''")
230
+ return str(v)
231
+
232
+ rows = [f"('{esc(c)}', '{esc(s)}')" for c, s in values]
233
+ return ",\n".join(rows)
@@ -0,0 +1,55 @@
1
+ from typing import Dict, Any
2
+ from db.db_util import exec_insert, exec_select, exec_update, exec_delete
3
+
4
+
5
+ # insert文を呼出
6
+ def insert_sql_info(key:str, params: Dict[str, Any]):
7
+ exec_insert(key, params)
8
+
9
+ # select文を呼出
10
+ def select_sql_info(key:str, params: Dict[str, Any]):
11
+ result_rows = exec_select(key, params)
12
+
13
+ if not result_rows:
14
+ return []
15
+ return [row_to_dict(r) for r in result_rows]
16
+
17
+ # update文を呼出
18
+ def update_sql_info(key:str, params: Dict[str, Any]):
19
+ exec_update(key, params)
20
+
21
+ # delete文を呼出
22
+ def delete_sql_info(key:str, params: Dict[str, Any]):
23
+ exec_delete(key, params)
24
+
25
+ def row_to_dict(row: Any) -> Dict[str, Any]:
26
+ """
27
+ 把数据库返回的“行对象”统一转换为普通 dict。
28
+ 支持:
29
+ - sqlite3.Row -> dict(row)
30
+ - SQLAlchemy RowProxy -> dict(row._mapping) (SQLA >=1.4)
31
+ - SQLAlchemy legacy RowProxy-> row._asdict()
32
+ - MyBatis‑Python ResultSet -> dict(row)
33
+ 如有其它库,只需在这里补充对应的转换方式。
34
+ """
35
+ # sqlite3.Row 直接可迭代 (key, value) 对
36
+ if isinstance(row, dict):
37
+ return row # 已经是 dict,直接返回
38
+
39
+ try:
40
+ # SQLAlchemy 1.4+ Row (has ._mapping)
41
+ return dict(row._mapping) # type: ignore[attr-defined]
42
+ except AttributeError:
43
+ pass
44
+
45
+ try:
46
+ # SQLAlchemy 1.3‑ RowProxy (has ._asdict())
47
+ return row._asdict() # type: ignore[attr-defined]
48
+ except AttributeError:
49
+ pass
50
+
51
+ try:
52
+ # 任何实现了 __iter__ 且返回 (key, value) 的对象(如 sqlite3.Row)
53
+ return dict(row)
54
+ except Exception as exc:
55
+ raise TypeError(f"Cannot convert row of type {type(row)} to dict") from exc
File without changes
@@ -0,0 +1,75 @@
1
+ import pathlib
2
+ import re
3
+ from typing import Mapping, Optional
4
+
5
+
6
+ def load_text(file_path: str | pathlib.Path) -> str:
7
+ """讀取檔案內容,返回 Unicode 字串。"""
8
+ path = pathlib.Path(file_path)
9
+ if not path.is_file():
10
+ raise FileNotFoundError(f"File not found: {path}")
11
+
12
+ return path.read_text(encoding='utf-8')
13
+
14
+ def replace_by_map(
15
+ text: str,
16
+ replace_map: Mapping[str, str],
17
+ *,
18
+ use_regex: bool = False,
19
+ case_sensitive: bool = True,
20
+ ) -> str:
21
+
22
+ if not replace_map:
23
+ return text
24
+
25
+ # 若使用正則,先把所有 pattern 編譯好,提高效能
26
+ if use_regex:
27
+ flags = 0 if case_sensitive else re.IGNORECASE
28
+ compiled = [(re.compile(p, flags), repl) for p, repl in replace_map.items()]
29
+ for pattern, repl in compiled:
30
+ text = pattern.sub(repl, text)
31
+ else:
32
+ # 直接使用 str.replace,速度最快
33
+ for old, new in replace_map.items():
34
+
35
+ text = text.replace(old, new)
36
+
37
+ return text
38
+
39
+ def save_text(
40
+ file_path: str | pathlib.Path,
41
+ text: str,
42
+ *,
43
+ encoding: Optional[str] = None,
44
+ ) -> None:
45
+
46
+ path = pathlib.Path(file_path)
47
+ if not path.parent.exists():
48
+ path.parent.mkdir(parents=True, exist_ok=True)
49
+
50
+ enc = encoding or "utf-8"
51
+ path.write_text(text, encoding=enc)
52
+
53
+ def process_file(
54
+ src_path: str | pathlib.Path,
55
+ dst_path: Optional[str | pathlib.Path] = None,
56
+ replace_map: Optional[Mapping[str, str]] = None,
57
+ *,
58
+ use_regex: bool = False,
59
+ case_sensitive: bool = True,
60
+ ) -> None:
61
+
62
+ original = load_text(src_path)
63
+
64
+ if replace_map:
65
+ new_text = replace_by_map(
66
+ original,
67
+ replace_map,
68
+ use_regex=use_regex,
69
+ case_sensitive=case_sensitive,
70
+ )
71
+ else:
72
+ new_text = original
73
+
74
+ target = dst_path if dst_path is not None else src_path
75
+ save_text(target, new_text)
@@ -0,0 +1,103 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import List, Dict, Any, Sequence
4
+
5
+
6
+ # -------------------------------------------------
7
+ # 读取映射 JSON(返回 list[dict])
8
+ # -------------------------------------------------
9
+ def load_mapping(json_path: Path) -> List[Dict[str, str]]:
10
+ """
11
+ 参数
12
+ ----
13
+ json_path : Path
14
+ 包含 {"table_name": "...", "table_name_jp": "..."} 记录的 JSON 文件
15
+
16
+ 返回
17
+ ----
18
+ List[Dict[str, str]]
19
+ 直接返回 JSON 中的数组对象,后面会用它做查找
20
+ """
21
+ if not json_path.is_file():
22
+ raise FileNotFoundError(f"映射文件不存在: {json_path}")
23
+ return json.loads(json_path.read_text(encoding="utf-8"))
24
+
25
+ def get_json_items(
26
+ source_keys: List[str],
27
+ json_file_path: str,
28
+ *,
29
+ src_field: str = "key",
30
+ dst_fields: Sequence[str] = ("value",),
31
+ default: Any = None,
32
+ extra_fields: Sequence[str] = (),
33
+ result_key_prefix: str = "",
34
+ ) -> List[Dict[str, Any]]:
35
+ """
36
+ 通用映射函数(支持一次提取多个目标字段)。
37
+
38
+ 参数
39
+ ----
40
+ source_keys : List[str]
41
+ 待映射的键列表(如表名、代码等)。
42
+ mapping_array : List[Dict[str, Any]]
43
+ 从 JSON 读取的映射记录,每条记录必须至少包含 ``src_field`` 与
44
+ 所有在 ``dst_fields`` 中列出的字段。
45
+ src_field : str, default "key"
46
+ 用来匹配 ``source_keys`` 的字段名。
47
+ dst_fields : Sequence[str], default ("value",)
48
+ 需要一次性提取的 **一个或多个** 目标字段名。
49
+ default : Any, default None
50
+ 当 ``source_keys`` 在映射中找不到时使用的默认值。
51
+ extra_fields : Sequence[str], default ()
52
+ 需要把映射记录里额外字段原样复制到结果中的键名列表。
53
+ result_key_prefix : str, default ""
54
+ 给返回的字段名前加统一前缀(例如 ``"tbl_"``),防止键冲突。
55
+
56
+ 返回
57
+ ----
58
+ List[Dict[str, Any]]
59
+ 每条记录形如:
60
+ {
61
+ "<prefix><src_field>" : "...", # 原始键
62
+ "<prefix><dst1>" : "...",
63
+ "<prefix><dst2>" : "...",
64
+ ... extra_fields ...
65
+ }
66
+ """
67
+ # -------------------------------------------------
68
+ # 1️⃣ 把映射数组转成 dict,便于 O(1) 查找
69
+ # -------------------------------------------------
70
+
71
+ mapping_array = load_mapping(Path(json_file_path))
72
+
73
+ lookup: Dict[Any, Dict[str, Any]] = {}
74
+ for rec in mapping_array:
75
+ key = rec.get(src_field)
76
+ if key is not None: # 跳过缺失 src_field 的脏数据
77
+ lookup[key] = rec
78
+
79
+ # -------------------------------------------------
80
+ # 2️⃣ 构造结果列表
81
+ # -------------------------------------------------
82
+ result: List[Dict[str, Any]] = []
83
+ for k in source_keys:
84
+ rec = lookup.get(k) # 可能为 None
85
+
86
+ # 基础字段(原始键)
87
+ item: Dict[str, Any] = {
88
+ f"{result_key_prefix}{src_field}": k,
89
+ }
90
+
91
+ # 目标字段们
92
+ for dst in dst_fields:
93
+ value = rec.get(dst, default) if rec else default
94
+ item[f"{result_key_prefix}{dst}"] = value
95
+
96
+ # 额外字段(若有对应记录则复制,否则填 default)
97
+ for extra in extra_fields:
98
+ item[f"{result_key_prefix}{extra}"] = (
99
+ rec.get(extra, default) if rec else default
100
+ )
101
+ result.append(item)
102
+
103
+ return result
File without changes
File without changes
@@ -0,0 +1,16 @@
1
+ import threading
2
+
3
+ from pilot.job.impl.base_job import BaseJob
4
+
5
+ from pilot.conver.converfileEncodding import nkf_convert
6
+
7
+
8
+ class EncodingTransformerJob(BaseJob):
9
+ _begin_file_lock = threading.Lock()
10
+ def run(self):
11
+ with self._begin_file_lock:
12
+ if not self.change_current_trg_to_begin():
13
+ return
14
+ nkf_args = ['-w', '--overwrite']
15
+ nkf_convert(self.file_path, nkf_args)
16
+ super().run()
@@ -0,0 +1,27 @@
1
+ import threading
2
+ from pathlib import Path
3
+
4
+ from pilot.job.impl.base_job import BaseJob
5
+
6
+ class TabReplaceJob(BaseJob):
7
+ _begin_file_lock = threading.Lock()
8
+ def run(self):
9
+ with self._begin_file_lock:
10
+ if not self.change_current_trg_to_begin():
11
+ return
12
+ self.replace_tabs_with_spaces()
13
+ super().run()
14
+
15
+ def replace_tabs_with_spaces(self, tab_width: int = 4):
16
+ replaced_text =[]
17
+ src_path = Path(self.file_path)
18
+ spaces = ' ' * tab_width
19
+ with open(self.file_path, 'r', encoding='utf-8', newline='') as rf:
20
+ for line in rf:
21
+ replaced_text.append(line.replace('\t', spaces))
22
+
23
+ tmp_path = src_path.parent / (src_path.name + '.tmp')
24
+ with open(tmp_path, 'w', encoding='utf-8', newline='') as wf:
25
+ wf.writelines(replaced_text)
26
+
27
+ tmp_path.replace(src_path)
File without changes
@@ -0,0 +1,42 @@
1
+ import json
2
+ import os
3
+ import threading
4
+ import time
5
+
6
+ from pilot.job.impl.base_job import BaseJob
7
+
8
+ from pilot.generater.vertexai import VertexAISingleton
9
+
10
+ class generateJsonBaseJob(BaseJob):
11
+
12
+ prompt_content: str
13
+ result_content: str
14
+ result_file_path: str
15
+
16
+ def run(self):
17
+ #with self._begin_file_lock:
18
+ # if not self.change_current_trg_to_begin():
19
+ # return
20
+ #prompt = self.get_file_content()
21
+ prompt = self.prompt_content
22
+ # トークン数チェック
23
+ vertexai = VertexAISingleton.get_instance()
24
+ token_count = vertexai.count_tokens(prompt)
25
+ if token_count == 0:
26
+ super().run()
27
+ return
28
+ if token_count > 900000:
29
+ print(f"警告: promptのトークン数が900000を超えています ({token_count} tokens)")
30
+ super().run()
31
+ return
32
+ # VertexAI で生成
33
+ start = time.time()
34
+ result = vertexai.generate_content(prompt)
35
+ end = time.time()
36
+ print(f"Ai 処理時間 {self.file_path}: {end - start:.2f}秒")
37
+
38
+ result_content = result.get('response', '')
39
+ data = json.loads(result_content)
40
+ with open(self.result_file_path, 'w', encoding='utf-8') as f:
41
+ json.dump(data, f, ensure_ascii=False, indent=2)
42
+ super().run()
@@ -0,0 +1,40 @@
1
+ import json
2
+ import os
3
+ import threading
4
+ import time
5
+
6
+ from pilot.job.impl.base_job import BaseJob
7
+
8
+ from pilot.generater.vertexai import VertexAISingleton
9
+
10
+ class textBaseJob(BaseJob):
11
+
12
+ prompt_content: str
13
+ result_content: str
14
+ result_file_path: str
15
+
16
+ def run(self):
17
+ #with self._begin_file_lock:
18
+ # if not self.change_current_trg_to_begin():
19
+ # return
20
+ #prompt = self.get_file_content()
21
+ prompt = self.prompt_content
22
+ # トークン数チェック
23
+ vertexai = VertexAISingleton.get_instance()
24
+ token_count = vertexai.count_tokens(prompt)
25
+ if token_count == 0:
26
+ super().run()
27
+ return
28
+ if token_count > 900000:
29
+ print(f"警告: promptのトークン数が900000を超えています ({token_count} tokens)")
30
+ super().run()
31
+ return
32
+ # VertexAI で生成
33
+ start = time.time()
34
+ result = vertexai.generate_content(prompt)
35
+ end = time.time()
36
+ print(f"AI 処理時間 {self.file_path}: {end - start:.2f}秒")
37
+ result_content = result.get('response', '')
38
+ with open(self.result_file_path, 'w', encoding='utf-8') as f:
39
+ f.write(result_content)
40
+ super().run()
@@ -67,7 +67,11 @@ class BaseJob(JobInterface):
67
67
  self._trg_file_path = value
68
68
 
69
69
 
70
+ def prerun(self):
71
+ pass
70
72
 
73
+ def postrun(self):
74
+ self.change_current_trg_to_end()
71
75
 
72
76
  def run(self):
73
77
  pass
File without changes
@@ -31,6 +31,7 @@ class BaseUnit(UnitInterface):
31
31
  job.file_path = file_path
32
32
  if self.job_need_run(job, filename, index):
33
33
  job.run()
34
+ job.postrun()
34
35
 
35
36
  def job_need_run(self, job:BaseJob,filename: str,index):
36
37
  return True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pilot.linkstec
3
- Version: 0.0.32
3
+ Version: 0.0.91
4
4
  Summary: pilot of the ship, a tool for managing and deploying Python projects.
5
5
  Author-email: wanglr <wanglr1980@gmail.com>
6
6
  License-Expression: MIT