pilot.linkstec 0.0.23__tar.gz → 0.0.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pilot.linkstec might be problematic. Click here for more details.

Files changed (39) hide show
  1. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/PKG-INFO +1 -1
  2. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/pyproject.toml +1 -1
  3. pilot_linkstec-0.0.25/src/pilot/generater/vertexai.py +72 -0
  4. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot.linkstec.egg-info/PKG-INFO +1 -1
  5. pilot_linkstec-0.0.23/src/pilot/generater/vertexai.py +0 -147
  6. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/LICENSE +0 -0
  7. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/README.md +0 -0
  8. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/setup.cfg +0 -0
  9. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/__init__.py +0 -0
  10. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/config/__init__.py +0 -0
  11. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/config/config_reader.py +0 -0
  12. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/control/__init__.py +0 -0
  13. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/control/control_interface.py +0 -0
  14. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/control/impl/__init__.py +0 -0
  15. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/control/impl/base_controller.py +0 -0
  16. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/conver/__init__.py +0 -0
  17. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/conver/converfileEncodding.py +0 -0
  18. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/conver/nkf_converter.py +0 -0
  19. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/generater/__init__.py +0 -0
  20. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/job/__init__.py +0 -0
  21. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/job/impl/__init__.py +0 -0
  22. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/job/impl/base_job.py +0 -0
  23. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/job/job_interface.py +0 -0
  24. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/logging/__init__.py +0 -0
  25. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/logging/logger.py +0 -0
  26. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/processor/__init__.py +0 -0
  27. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/processor/code_processor.py +0 -0
  28. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/processor/code_processor_pipeline.py +0 -0
  29. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/splitters/__init__.py +0 -0
  30. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/splitters/cobolsplitter.py +0 -0
  31. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/unit/__init__.py +0 -0
  32. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/unit/impl/__init__.py +0 -0
  33. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/unit/impl/base_unit.py +0 -0
  34. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/unit/unit_interface.py +0 -0
  35. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/util/__init__.py +0 -0
  36. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot/util/files.py +0 -0
  37. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot.linkstec.egg-info/SOURCES.txt +0 -0
  38. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot.linkstec.egg-info/dependency_links.txt +0 -0
  39. {pilot_linkstec-0.0.23 → pilot_linkstec-0.0.25}/src/pilot.linkstec.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pilot.linkstec
3
- Version: 0.0.23
3
+ Version: 0.0.25
4
4
  Summary: pilot of the ship, a tool for managing and deploying Python projects.
5
5
  Author-email: wanglr <wanglr1980@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "pilot.linkstec"
3
- version = "0.0.23"
3
+ version = "0.0.25"
4
4
  authors = [
5
5
  { name="wanglr", email="wanglr1980@gmail.com" },
6
6
  ]
@@ -0,0 +1,72 @@
1
+ import threading
2
+ from typing import Dict, Any, Optional
3
+
4
+ import tiktoken
5
+ from vertexai.generative_models import GenerativeModel, ChatSession
6
+ import os
7
+
8
+ class VertexAISingleton:
9
+ _instance: Optional['VertexAISingleton'] = None
10
+ _lock = threading.Lock()
11
+ _tokenizer_cache = {}
12
+ encoding = None
13
+
14
+ def __new__(cls, model_name: str = "gemini-2.5-pro"):
15
+ if cls._instance is None:
16
+ with cls._lock:
17
+ if cls._instance is None:
18
+ cls._instance = super(VertexAISingleton, cls).__new__(cls)
19
+ cls._instance._initialized = False
20
+ return cls._instance
21
+
22
+ def __init__(self, model_name: str = "gemini-2.5-pro"):
23
+ if not self._initialized:
24
+ with self._lock:
25
+ if not self._initialized:
26
+ self.model = GenerativeModel(model_name)
27
+ self.encoding = tiktoken.get_encoding("cl100k_base")
28
+ self._initialized = True
29
+
30
+ def generate_content(self, prompt: str) -> Dict[str, Any]:
31
+ """複数スレッドから安全に呼び出し可能"""
32
+ try:
33
+ response = self.model.generate_content(prompt)
34
+ return {
35
+ "prompt": prompt,
36
+ "response": self._remove_code_fence(response.text),
37
+ "success": True,
38
+ "error": None
39
+ }
40
+ except Exception as e:
41
+ return {
42
+ "prompt": prompt,
43
+ "response": None,
44
+ "success": False,
45
+ "error": str(e)
46
+ }
47
+
48
+ def start_chat(self) -> ChatSession:
49
+ """新しいチャットセッションを開始"""
50
+ return self.model.start_chat()
51
+
52
+ def count_tokens(self, text: str) -> int:
53
+ """与えられたテキストのトークン数を返す(bert-base-uncasedのみ使用)"""
54
+ try:
55
+ tokens = self.encoding.encode(text)
56
+ return len(tokens)
57
+ except Exception as e:
58
+ print(f"トークン計算失敗: {e}")
59
+ return 0
60
+
61
+ def _remove_code_fence(self, text: str) -> str:
62
+ lines = text.splitlines()
63
+ if lines and lines[0].startswith("```"):
64
+ lines = lines[1:]
65
+ if lines and lines[-1].startswith("```"):
66
+ lines = lines[:-1]
67
+ return "\n".join(lines)
68
+
69
+ @classmethod
70
+ def get_instance(cls, model_name: str = "gemini-2.5-pro") -> 'VertexAISingleton':
71
+ """インスタンスを取得"""
72
+ return cls(model_name)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pilot.linkstec
3
- Version: 0.0.23
3
+ Version: 0.0.25
4
4
  Summary: pilot of the ship, a tool for managing and deploying Python projects.
5
5
  Author-email: wanglr <wanglr1980@gmail.com>
6
6
  License-Expression: MIT
@@ -1,147 +0,0 @@
1
- import re
2
- import threading
3
- from typing import Dict, Any, Optional
4
-
5
- import tiktoken
6
- from vertexai.generative_models import GenerativeModel, ChatSession, GenerationConfig
7
- from google.genai import types
8
-
9
- class VertexAISingleton:
10
- _instance: Optional['VertexAISingleton'] = None
11
- _lock = threading.Lock()
12
- _tokenizer_cache = {}
13
- encoding = None
14
-
15
- SQL_SYSTEM_PROMPT_EN = (
16
- "If there is any SQL-related processing \n"
17
- "1. If there is an SQL statement like \"SELECT COUNT(X) INTO :COLUMN FROM TABLE_NAME;\" please recognize TABLE_NAME as a table."
18
- )
19
-
20
-
21
- def __new__(cls, model_name: str = "gemini-2.5-pro", system_prompt: Optional[str] = None):
22
- if cls._instance is None:
23
- with cls._lock:
24
- if cls._instance is None:
25
- cls._instance = super(VertexAISingleton, cls).__new__(cls)
26
- cls._instance._initialized = False
27
- return cls._instance
28
-
29
- def __init__(self, model_name: str = "gemini-2.5-pro", system_prompt: Optional[str] = None):
30
- if not self._initialized:
31
- with self._lock:
32
- if not self._initialized:
33
- if system_prompt:
34
- self.system_prompt = f"{system_prompt.rstrip()}\n\n{self.SQL_SYSTEM_PROMPT_EN}"
35
- else:
36
- self.system_prompt = self.SQL_SYSTEM_PROMPT_EN
37
- self.model = GenerativeModel(model_name=model_name,system_instruction = self.system_prompt)
38
- self.encoding = tiktoken.get_encoding("cl100k_base")
39
- # system_promptにSQL_SYSTEM_PROMPT_ENを追加
40
-
41
- self._initialized = True
42
- else:
43
- # 既存インスタンスでもsystem_promptを更新可能に
44
- if system_prompt is not None:
45
- self.system_prompt = f"{system_prompt.rstrip()}\n\n{self.SQL_SYSTEM_PROMPT_EN}" if system_prompt else self.SQL_SYSTEM_PROMPT_EN
46
-
47
- def generate_content(self, prompt: str) -> Dict[str, Any]:
48
- """複数スレッドから安全に呼び出し可能"""
49
- try:
50
- # システムプロンプトをconfigとして渡す
51
- prompt = self.exchange_prompt(prompt)
52
- response = self.model.generate_content(
53
- contents=prompt # 引数名を明示
54
- )
55
- return {
56
- "prompt": prompt,
57
- "response": self._remove_code_fence(response.text),
58
- "success": True,
59
- "error": None
60
- }
61
- except Exception as e:
62
- return {
63
- "prompt": prompt,
64
- "response": None,
65
- "success": False,
66
- "error": str(e)
67
- }
68
-
69
- def start_chat(self) -> ChatSession:
70
- """新しいチャットセッションを開始"""
71
- return self.model.start_chat()
72
-
73
- def count_tokens(self, text: str) -> int:
74
- """与えられたテキストのトークン数を返す(bert-base-uncasedのみ使用)"""
75
- try:
76
- tokens = self.encoding.encode(text)
77
- return len(tokens)
78
- except Exception as e:
79
- print(f"トークン計算失敗: {e}")
80
- return 0
81
-
82
- def _remove_code_fence(self, text: str) -> str:
83
- lines = text.splitlines()
84
- if lines and lines[0].startswith("```"):
85
- lines = lines[1:]
86
- if lines and lines[-1].startswith("```"):
87
- lines = lines[:-1]
88
- return "\n".join(lines)
89
-
90
-
91
- def exchange_prompt(self, prompt: str) -> str:
92
- # EXEC SQL ... END-EXEC. のSQL部分を抽出してフラット化
93
- rtn_prompt = self.fix_initialize(prompt)
94
- rtn_prompt = self.extract_and_flatten_sql(rtn_prompt)
95
- return rtn_prompt
96
-
97
- def fix_initialize(self, text: str) -> str:
98
- # SECTION ... EXIT. ブロック内のINITIALIZE文を処理
99
- def process_section_block(match):
100
- section_content = match.group(0)
101
-
102
- # INITIALIZE の行を結合する(SECTION-EXIT間のみ)
103
- # INITIALIZEで始まる行の次の行が空白+文字列の場合に結合
104
- pattern_init = r'^(\s*INITIALIZE\s+[^\n]*)\n(\s+[^\n]+(?:\s+[^\n]+)*)'
105
-
106
- def repl_init(m):
107
- init_line = m.group(1).rstrip()
108
- next_lines = m.group(2).strip()
109
- return f'{init_line} {next_lines}'
110
-
111
- section_content = re.sub(pattern_init, repl_init, section_content, flags=re.MULTILINE)
112
-
113
- # ブロック内 COUNT(*) → COUNT(1) へ置換する
114
- section_content = re.sub(r'COUNT\(\s*\*\s*\)', 'COUNT(1)', section_content, flags=re.IGNORECASE)
115
-
116
- return section_content
117
-
118
- # SECTION から EXIT. までのブロックを検索して処理
119
- section_pattern = r'(\w+\s+SECTION\s*\..*?EXIT\s*\.)'
120
- text = re.sub(section_pattern, process_section_block, text, flags=re.DOTALL | re.IGNORECASE)
121
-
122
- return text
123
-
124
-
125
- def extract_and_flatten_sql(self, code):
126
- # EXEC SQL ... END-EXEC. にマッチ
127
- pattern = r"EXEC SQL(.*?)END-EXEC\.?"
128
-
129
- def repl(m):
130
- # .*?でSQL部分取得
131
- raw_sql = m.group(1)
132
- # コメント(*以降)除去(複数行まとめてOK)
133
- no_comment = re.sub(r"\*.*", "", raw_sql)
134
- # 改行/連続スペースを単一スペースに
135
- flattened = re.sub(r"\s+", " ", no_comment).strip()
136
- # 置換内容
137
- return f"EXEC SQL {flattened} END-EXEC."
138
-
139
- # 全て置換
140
- result = re.sub(pattern, repl, code, flags=re.DOTALL | re.IGNORECASE)
141
- return result
142
-
143
-
144
- @classmethod
145
- def get_instance(cls, model_name: str = "gemini-2.5-pro", system_prompt: Optional[str] = None) -> 'VertexAISingleton':
146
- """インスタンスを取得"""
147
- return cls(model_name, system_prompt)
File without changes