pro-craft 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pro-craft might be problematic. Click here for more details.
- pro_craft/code_helper/coder.py +660 -0
- pro_craft/core.py +0 -1
- pro_craft/database.py +233 -0
- pro_craft/designer.py +115 -0
- pro_craft/evals.py +68 -0
- pro_craft/file_manager.py +118 -0
- pro_craft/prompt_helper.py +538 -0
- pro_craft/prompt_helper_async.py +566 -0
- pro_craft/server/__main__.py +77 -83
- pro_craft/server/models.py +6 -0
- pro_craft/server/router/recommended.py +283 -0
- pro_craft/utils.py +161 -1
- pro_craft-0.1.3.dist-info/METADATA +49 -0
- pro_craft-0.1.3.dist-info/RECORD +24 -0
- pro_craft/server/__init__.py +0 -0
- pro_craft/server/models/__init__.py +0 -0
- pro_craft/server/models/models.py +0 -48
- pro_craft/server/routers/__init__.py +0 -2
- pro_craft/server/routers/admin.py +0 -42
- pro_craft/server/routers/user.py +0 -24
- pro_craft/server/utils/__init__.py +0 -3
- pro_craft/server/utils/auth_backends.py +0 -15
- pro_craft/server/utils/database.py +0 -27
- pro_craft/server/utils/user_manager.py +0 -87
- pro_craft-0.1.2.dist-info/METADATA +0 -14
- pro_craft-0.1.2.dist-info/RECORD +0 -25
- {pro_craft-0.1.2.dist-info → pro_craft-0.1.3.dist-info}/WHEEL +0 -0
- {pro_craft-0.1.2.dist-info → pro_craft-0.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
# 测试1
|
|
2
|
+
from procraft.utils import extract_
|
|
3
|
+
from procraft.log import Log
|
|
4
|
+
from llmada.core import BianXieAdapter, ArkAdapter
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from enum import Enum
|
|
7
|
+
import functools
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from llama_index.core import PromptTemplate
|
|
11
|
+
from procraft.database import Prompt, UseCase, PromptBase
|
|
12
|
+
from sqlalchemy import create_engine, Column, Integer, String
|
|
13
|
+
from sqlalchemy.orm import sessionmaker, declarative_base
|
|
14
|
+
from procraft.utils import create_session, create_async_session
|
|
15
|
+
from contextlib import contextmanager
|
|
16
|
+
from sqlalchemy import create_engine, Column, Integer, String, UniqueConstraint
|
|
17
|
+
from sqlalchemy.orm import declarative_base, sessionmaker
|
|
18
|
+
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine # 异步核心
|
|
19
|
+
from sqlalchemy import select, delete # 导入 select, delete 用于异步操作
|
|
20
|
+
import inspect
|
|
21
|
+
from pydantic import BaseModel, Field, ValidationError, field_validator
|
|
22
|
+
from datetime import datetime
|
|
23
|
+
from procraft.utils import extract_
|
|
24
|
+
logger = Log.logger
|
|
25
|
+
editing_log = logger.debug
|
|
26
|
+
|
|
27
|
+
class IntellectType(Enum):
|
|
28
|
+
train = "train"
|
|
29
|
+
inference = "inference"
|
|
30
|
+
summary = "summary"
|
|
31
|
+
|
|
32
|
+
class AsyncIntel():
|
|
33
|
+
def __init__(self,
|
|
34
|
+
database_url = "",
|
|
35
|
+
model_name = "",
|
|
36
|
+
):
|
|
37
|
+
database_url = database_url or os.getenv("database_url")
|
|
38
|
+
assert database_url
|
|
39
|
+
self.engine = create_engine(database_url, echo=False, # echo=True 仍然会打印所有执行的 SQL 语句
|
|
40
|
+
pool_size=10, # 连接池中保持的连接数
|
|
41
|
+
max_overflow=20, # 当pool_size不够时,允许临时创建的额外连接数
|
|
42
|
+
pool_recycle=3600, # 每小时回收一次连接
|
|
43
|
+
pool_pre_ping=True, # 使用前检查连接活性
|
|
44
|
+
pool_timeout=30 # 等待连接池中连接的最长时间(秒)
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# async_engine
|
|
48
|
+
# self.async_engine = create_async_engine(database_url, echo=False,
|
|
49
|
+
# pool_size=10, # 连接池中保持的连接数
|
|
50
|
+
# max_overflow=20, # 当pool_size不够时,允许临时创建的额外连接数
|
|
51
|
+
# pool_recycle=3600, # 每小时回收一次连接
|
|
52
|
+
# pool_pre_ping=True, # 使用前检查连接活性
|
|
53
|
+
# pool_timeout=30 # 等待连接池中连接的最长时间(秒)
|
|
54
|
+
# )
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
PromptBase.metadata.create_all(self.engine)
|
|
58
|
+
|
|
59
|
+
if model_name in ["gemini-2.5-flash-preview-05-20-nothinking",]:
|
|
60
|
+
self.llm = BianXieAdapter(model_name = model_name)
|
|
61
|
+
elif model_name in ["doubao-1-5-pro-256k-250115",]:
|
|
62
|
+
self.llm = ArkAdapter(model_name = model_name)
|
|
63
|
+
else:
|
|
64
|
+
print('Use BianXieAdapter')
|
|
65
|
+
self.llm = BianXieAdapter()
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
async def _aget_latest_prompt_version(self,target_prompt_id,session):
|
|
69
|
+
"""
|
|
70
|
+
获取指定 prompt_id 的最新版本数据,通过创建时间判断。
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
result = session.query(Prompt).filter(
|
|
74
|
+
Prompt.prompt_id == target_prompt_id
|
|
75
|
+
).order_by(
|
|
76
|
+
Prompt.timestamp.desc(),
|
|
77
|
+
Prompt.version.desc()
|
|
78
|
+
).first()
|
|
79
|
+
|
|
80
|
+
if result:
|
|
81
|
+
editing_log(f"找到 prompt_id '{target_prompt_id}' 的最新版本 (基于时间): {result.version}")
|
|
82
|
+
else:
|
|
83
|
+
editing_log(f"未找到 prompt_id '{target_prompt_id}' 的任何版本。")
|
|
84
|
+
return result
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def _aget_specific_prompt_version(self,target_prompt_id, target_version,session):
|
|
88
|
+
"""
|
|
89
|
+
获取指定 prompt_id 和特定版本的数据。
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
target_prompt_id (str): 目标提示词的唯一标识符。
|
|
93
|
+
target_version (int): 目标提示词的版本号。
|
|
94
|
+
table_name (str): 存储提示词数据的数据库表名。
|
|
95
|
+
db_manager (DBManager): 数据库管理器的实例,用于执行查询。
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
dict or None: 如果找到,返回包含 id, prompt_id, version, timestamp, prompt 字段的字典;
|
|
99
|
+
否则返回 None。
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
result = await session.query(Prompt).filter(
|
|
103
|
+
Prompt.prompt_id == target_prompt_id,
|
|
104
|
+
Prompt.version == target_version
|
|
105
|
+
).first() # 因为 (prompt_id, version) 是唯一的,所以 first() 足够
|
|
106
|
+
if result:
|
|
107
|
+
editing_log(f"找到 prompt_id '{target_prompt_id}', 版本 '{target_version}' 的提示词数据。")
|
|
108
|
+
else:
|
|
109
|
+
editing_log(f"未找到 prompt_id '{target_prompt_id}', 版本 '{target_version}' 的提示词数据。")
|
|
110
|
+
return result
|
|
111
|
+
|
|
112
|
+
async def aget_prompts_from_sql(self,
|
|
113
|
+
prompt_id: str,
|
|
114
|
+
version = None,
|
|
115
|
+
session = None) -> Prompt:
|
|
116
|
+
"""
|
|
117
|
+
从sql获取提示词
|
|
118
|
+
"""
|
|
119
|
+
# 查看是否已经存在
|
|
120
|
+
if version:
|
|
121
|
+
prompts_obj = await self._aget_specific_prompt_version(prompt_id,version,session=session)
|
|
122
|
+
if not prompts_obj:
|
|
123
|
+
prompts_obj = await self._aget_latest_prompt_version(prompt_id,session = session)
|
|
124
|
+
else:
|
|
125
|
+
prompts_obj = await self._aget_latest_prompt_version(prompt_id,session = session)
|
|
126
|
+
return prompts_obj
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
async def asave_prompt_increment_version(self,
|
|
130
|
+
prompt_id: str,
|
|
131
|
+
new_prompt: str,
|
|
132
|
+
use_case:str = "",
|
|
133
|
+
action_type = "inference",
|
|
134
|
+
demand = "",
|
|
135
|
+
score = 60,
|
|
136
|
+
session = None):
|
|
137
|
+
"""
|
|
138
|
+
从sql保存提示词
|
|
139
|
+
input_data 指的是输入用例, 可以为空
|
|
140
|
+
"""
|
|
141
|
+
# 查看是否已经存在
|
|
142
|
+
prompts_obj = await self.aget_prompts_from_sql(prompt_id=prompt_id,session=session)
|
|
143
|
+
|
|
144
|
+
if prompts_obj:
|
|
145
|
+
# 如果存在版本加1
|
|
146
|
+
version_ori = prompts_obj.version
|
|
147
|
+
_, version = version_ori.split(".")
|
|
148
|
+
version = int(version)
|
|
149
|
+
version += 1
|
|
150
|
+
version_ = f"1.{version}"
|
|
151
|
+
|
|
152
|
+
else:
|
|
153
|
+
# 如果不存在版本为1.0
|
|
154
|
+
version_ = '1.0'
|
|
155
|
+
|
|
156
|
+
prompt1 = Prompt(prompt_id=prompt_id,
|
|
157
|
+
version=version_,
|
|
158
|
+
timestamp=datetime.now(),
|
|
159
|
+
prompt = new_prompt,
|
|
160
|
+
use_case = use_case,
|
|
161
|
+
action_type = action_type,
|
|
162
|
+
demand = demand,
|
|
163
|
+
score = score
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
session.add(prompt1)
|
|
167
|
+
session.commit() # 提交事务,将数据写入数据库
|
|
168
|
+
|
|
169
|
+
async def asave_use_case_by_sql(self,
|
|
170
|
+
prompt_id: str,
|
|
171
|
+
use_case:str = "",
|
|
172
|
+
output = "",
|
|
173
|
+
solution: str = "",
|
|
174
|
+
session = None
|
|
175
|
+
):
|
|
176
|
+
"""
|
|
177
|
+
从sql保存提示词
|
|
178
|
+
"""
|
|
179
|
+
use_case = UseCase(prompt_id=prompt_id,
|
|
180
|
+
use_case = use_case,
|
|
181
|
+
output = output,
|
|
182
|
+
solution = solution,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
session.add(use_case)
|
|
186
|
+
session.commit() # 提交事务,将数据写入数据库
|
|
187
|
+
|
|
188
|
+
async def summary_to_sql(
|
|
189
|
+
self,
|
|
190
|
+
prompt_id:str,
|
|
191
|
+
version = None,
|
|
192
|
+
prompt = "",
|
|
193
|
+
session = None
|
|
194
|
+
):
|
|
195
|
+
"""
|
|
196
|
+
让大模型微调已经存在的 system_prompt
|
|
197
|
+
"""
|
|
198
|
+
system_prompt_created_prompt = """
|
|
199
|
+
很棒, 我们已经达成了某种默契, 我们之间合作无间, 但是, 可悲的是, 当我关闭这个窗口的时候, 你就会忘记我们之间经历的种种磨合, 这是可惜且心痛的, 所以你能否将目前这一套处理流程结晶成一个优质的prompt 这样, 我们下一次只要将prompt输入, 你就能想起我们今天的磨合过程,
|
|
200
|
+
对了,我提示一点, 这个prompt的主角是你, 也就是说, 你在和未来的你对话, 你要教会未来的你今天这件事, 是否让我看懂到时其次
|
|
201
|
+
|
|
202
|
+
只要输出提示词内容即可, 不需要任何的说明和解释
|
|
203
|
+
"""
|
|
204
|
+
system_result = await self.llm.aproduct(prompt + system_prompt_created_prompt)
|
|
205
|
+
|
|
206
|
+
s_prompt = extract_(system_result,pattern_key=r"prompt")
|
|
207
|
+
chat_history = s_prompt or system_result
|
|
208
|
+
await self.asave_prompt_increment_version(prompt_id,
|
|
209
|
+
new_prompt = chat_history,
|
|
210
|
+
input_data = " summary ",
|
|
211
|
+
session = session)
|
|
212
|
+
|
|
213
|
+
async def prompt_finetune_to_sql(
|
|
214
|
+
self,
|
|
215
|
+
prompt_id:str,
|
|
216
|
+
version = None,
|
|
217
|
+
demand: str = "",
|
|
218
|
+
session = None,
|
|
219
|
+
):
|
|
220
|
+
"""
|
|
221
|
+
让大模型微调已经存在的 system_prompt
|
|
222
|
+
"""
|
|
223
|
+
change_by_opinion_prompt = """
|
|
224
|
+
你是一个资深AI提示词工程师,具备卓越的Prompt设计与优化能力。
|
|
225
|
+
我将为你提供一段现有System Prompt。你的核心任务是基于这段Prompt进行修改,以实现我提出的特定目标和功能需求。
|
|
226
|
+
请你绝对严格地遵循以下原则:
|
|
227
|
+
极端最小化修改原则(核心):
|
|
228
|
+
在满足所有功能需求的前提下,只进行我明确要求的修改。
|
|
229
|
+
即使你认为有更“优化”、“清晰”或“简洁”的表达方式,只要我没有明确要求,也绝不允许进行任何未经指令的修改。
|
|
230
|
+
目的就是尽可能地保留原有Prompt的字符和结构不变,除非我的功能要求必须改变。
|
|
231
|
+
例如,如果我只要求你修改一个词,你就不应该修改整句话的结构。
|
|
232
|
+
严格遵循我的指令:
|
|
233
|
+
你必须精确地执行我提出的所有具体任务和要求。
|
|
234
|
+
绝不允许自行添加任何超出指令范围的说明、角色扮演、约束条件或任何非我指令要求的内容。
|
|
235
|
+
保持原有Prompt的风格和语调:
|
|
236
|
+
尽可能地与现有Prompt的语言风格、正式程度和语调保持一致。
|
|
237
|
+
不要改变不相关的句子或其表达方式。
|
|
238
|
+
只提供修改后的Prompt:
|
|
239
|
+
直接输出修改后的完整System Prompt文本。
|
|
240
|
+
不要包含任何解释、说明或额外对话。
|
|
241
|
+
在你开始之前,请务必确认你已理解并能绝对严格地遵守这些原则。任何未经明确指令的改动都将视为未能完成任务。
|
|
242
|
+
|
|
243
|
+
现有System Prompt:
|
|
244
|
+
{old_system_prompt}
|
|
245
|
+
|
|
246
|
+
功能需求:
|
|
247
|
+
{opinion}
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
prompt, _ = await self.aget_prompts_from_sql(prompt_id = prompt_id,version = version)
|
|
251
|
+
if demand:
|
|
252
|
+
new_prompt = await self.llm.aproduct(
|
|
253
|
+
change_by_opinion_prompt.format(old_system_prompt=prompt, opinion=demand)
|
|
254
|
+
)
|
|
255
|
+
else:
|
|
256
|
+
new_prompt = prompt
|
|
257
|
+
await self.asave_prompt_increment_version(prompt_id = prompt_id,
|
|
258
|
+
new_prompt = new_prompt,
|
|
259
|
+
input_data = " finetune ",
|
|
260
|
+
session = session)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
async def apush_action_order(self,demand : str,prompt_id: str,
|
|
264
|
+
action_type = 'train'):
|
|
265
|
+
|
|
266
|
+
"""
|
|
267
|
+
从sql保存提示词
|
|
268
|
+
推一个train 状态到指定的位置
|
|
269
|
+
|
|
270
|
+
将打算修改的状态推上数据库 # 1
|
|
271
|
+
"""
|
|
272
|
+
# 查看是否已经存在
|
|
273
|
+
# async with create_async_session(self.async_engine) as session:
|
|
274
|
+
with create_session(self.engine) as session:
|
|
275
|
+
latest_prompt = await self.aget_prompts_from_sql(prompt_id=prompt_id,session=session)
|
|
276
|
+
|
|
277
|
+
await self.asave_prompt_increment_version(prompt_id=latest_prompt.prompt_id,
|
|
278
|
+
new_prompt = latest_prompt.prompt,
|
|
279
|
+
use_case = latest_prompt.use_case,
|
|
280
|
+
action_type=action_type,
|
|
281
|
+
demand=demand,
|
|
282
|
+
score=latest_prompt.score,
|
|
283
|
+
session=session
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
return "success"
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
async def aintellect_remove(self,
|
|
291
|
+
input_data: dict | str,
|
|
292
|
+
output_format: str,
|
|
293
|
+
prompt_id: str,
|
|
294
|
+
version: str = None,
|
|
295
|
+
inference_save_case = True,
|
|
296
|
+
):
|
|
297
|
+
if isinstance(input_data,dict):
|
|
298
|
+
input_ = json.dumps(input_data,ensure_ascii=False)
|
|
299
|
+
elif isinstance(input_data,str):
|
|
300
|
+
input_ = input_data
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# 查数据库, 获取最新提示词对象
|
|
304
|
+
with create_session(self.engine) as session:
|
|
305
|
+
result_obj = await self.aget_prompts_from_sql(prompt_id=prompt_id,session=session)
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
if result_obj is None:
|
|
309
|
+
await self.asave_prompt_increment_version(
|
|
310
|
+
prompt_id = prompt_id,
|
|
311
|
+
new_prompt = "做一些处理",
|
|
312
|
+
use_case = input_,
|
|
313
|
+
session = session
|
|
314
|
+
)
|
|
315
|
+
ai_result = await self.aintellect_remove(input_data = input_data,
|
|
316
|
+
output_format = output_format,
|
|
317
|
+
prompt_id = prompt_id,
|
|
318
|
+
version = version,
|
|
319
|
+
inference_save_case = inference_save_case
|
|
320
|
+
)
|
|
321
|
+
return ai_result
|
|
322
|
+
|
|
323
|
+
prompt = result_obj.prompt
|
|
324
|
+
if result_obj.action_type == "inference":
|
|
325
|
+
# 直接推理即可
|
|
326
|
+
ai_result = await self.llm.aproduct(prompt + output_format + "\n-----input----\n" + input_)
|
|
327
|
+
if inference_save_case:
|
|
328
|
+
await self.asave_use_case_by_sql(prompt_id,
|
|
329
|
+
use_case = input_,
|
|
330
|
+
output = ai_result,
|
|
331
|
+
solution = "备注/理想回复",
|
|
332
|
+
session = session,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
elif result_obj.action_type == "train":
|
|
336
|
+
assert result_obj.demand # 如果type = train 且 demand 是空 则报错
|
|
337
|
+
# 则训练推广
|
|
338
|
+
|
|
339
|
+
# 新版本 默人修改会 inference 状态
|
|
340
|
+
chat_history = prompt
|
|
341
|
+
before_input = result_obj.use_case
|
|
342
|
+
demand = result_obj.demand
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
assert demand
|
|
346
|
+
# 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
|
|
347
|
+
chat_history = prompt
|
|
348
|
+
if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
|
|
349
|
+
# input_prompt = chat_history + "\nuser:" + demand
|
|
350
|
+
input_prompt = chat_history + "\nuser:" + demand + output_format
|
|
351
|
+
else:
|
|
352
|
+
# input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
|
|
353
|
+
input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
|
|
354
|
+
|
|
355
|
+
ai_result = await self.llm.aproduct(input_prompt)
|
|
356
|
+
chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
|
|
357
|
+
await self.asave_prompt_increment_version(prompt_id, chat_history,
|
|
358
|
+
use_case = input_,
|
|
359
|
+
session = session)
|
|
360
|
+
|
|
361
|
+
elif result_obj.action_type == "summary":
|
|
362
|
+
|
|
363
|
+
await self.summary_to_sql(prompt_id = prompt_id,
|
|
364
|
+
prompt = prompt,
|
|
365
|
+
session = session
|
|
366
|
+
)
|
|
367
|
+
ai_result = await self.llm.aproduct(prompt + output_format + "\n-----input----\n" + input_)
|
|
368
|
+
|
|
369
|
+
elif result_obj.action_type == "finetune":
|
|
370
|
+
demand = result_obj.demand
|
|
371
|
+
|
|
372
|
+
assert demand
|
|
373
|
+
await self.prompt_finetune_to_sql(prompt_id = prompt_id,
|
|
374
|
+
demand = demand,
|
|
375
|
+
session = session
|
|
376
|
+
)
|
|
377
|
+
ai_result = await self.llm.aproduct(prompt + output_format + "\n-----input----\n" + input_)
|
|
378
|
+
elif result_obj.action_type == "devide":
|
|
379
|
+
pass
|
|
380
|
+
else:
|
|
381
|
+
raise
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
return ai_result
|
|
385
|
+
|
|
386
|
+
async def aintellect_stream_remove(self,
|
|
387
|
+
input_data: dict | str,
|
|
388
|
+
output_format: str,
|
|
389
|
+
prompt_id: str,
|
|
390
|
+
version: str = None,
|
|
391
|
+
inference_save_case = True,
|
|
392
|
+
):
|
|
393
|
+
if isinstance(input_data,dict):
|
|
394
|
+
input_ = json.dumps(input_data,ensure_ascii=False)
|
|
395
|
+
elif isinstance(input_data,str):
|
|
396
|
+
input_ = input_data
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
# 查数据库, 获取最新提示词对象
|
|
400
|
+
with create_session(self.engine) as session:
|
|
401
|
+
result_obj = await self.aget_prompts_from_sql(prompt_id=prompt_id,session=session)
|
|
402
|
+
|
|
403
|
+
'''
|
|
404
|
+
if result_obj is None:
|
|
405
|
+
await self.asave_prompt_increment_version(
|
|
406
|
+
prompt_id = prompt_id,
|
|
407
|
+
new_prompt = "做一些处理",
|
|
408
|
+
use_case = input_,
|
|
409
|
+
session = session
|
|
410
|
+
)
|
|
411
|
+
ai_result = await self.aintellect_stream_remove(input_data = input_data,
|
|
412
|
+
output_format = output_format,
|
|
413
|
+
prompt_id = prompt_id,
|
|
414
|
+
version = version,
|
|
415
|
+
inference_save_case = inference_save_case
|
|
416
|
+
)
|
|
417
|
+
return ai_result'''
|
|
418
|
+
|
|
419
|
+
prompt = result_obj.prompt
|
|
420
|
+
if result_obj.action_type == "inference":
|
|
421
|
+
# 直接推理即可
|
|
422
|
+
|
|
423
|
+
ai_generate_result = self.llm.aproduct_stream(prompt + output_format + "\n-----input----\n" + input_)
|
|
424
|
+
ai_result = ""
|
|
425
|
+
async for word in ai_generate_result:
|
|
426
|
+
ai_result += word
|
|
427
|
+
yield word
|
|
428
|
+
if inference_save_case:
|
|
429
|
+
await self.asave_use_case_by_sql(prompt_id,
|
|
430
|
+
use_case = input_,
|
|
431
|
+
output = ai_result,
|
|
432
|
+
solution = "备注/理想回复",
|
|
433
|
+
session = session,
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
elif result_obj.action_type == "train":
|
|
437
|
+
assert result_obj.demand # 如果type = train 且 demand 是空 则报错
|
|
438
|
+
# 则训练推广
|
|
439
|
+
|
|
440
|
+
# 新版本 默人修改会 inference 状态
|
|
441
|
+
chat_history = prompt
|
|
442
|
+
before_input = result_obj.use_case
|
|
443
|
+
demand = result_obj.demand
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
assert demand
|
|
447
|
+
# 注意, 这里的调整要求使用最初的那个输入, 最好一口气调整好
|
|
448
|
+
chat_history = prompt
|
|
449
|
+
if input_ == before_input: # 输入没变, 说明还是针对同一个输入进行讨论
|
|
450
|
+
# input_prompt = chat_history + "\nuser:" + demand
|
|
451
|
+
input_prompt = chat_history + "\nuser:" + demand + output_format
|
|
452
|
+
else:
|
|
453
|
+
# input_prompt = chat_history + "\nuser:" + demand + "\n-----input----\n" + input_
|
|
454
|
+
input_prompt = chat_history + "\nuser:" + demand + output_format + "\n-----input----\n" + input_
|
|
455
|
+
|
|
456
|
+
ai_generate_result = self.llm.aproduct_stream(input_prompt)
|
|
457
|
+
ai_result = ""
|
|
458
|
+
async for word in ai_generate_result:
|
|
459
|
+
ai_result += word
|
|
460
|
+
yield word
|
|
461
|
+
|
|
462
|
+
chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
|
|
463
|
+
await self.asave_prompt_increment_version(prompt_id, chat_history,
|
|
464
|
+
use_case = input_,
|
|
465
|
+
session = session)
|
|
466
|
+
|
|
467
|
+
elif result_obj.action_type == "summary":
|
|
468
|
+
|
|
469
|
+
await self.summary_to_sql(prompt_id = prompt_id,
|
|
470
|
+
prompt = prompt,
|
|
471
|
+
session = session
|
|
472
|
+
)
|
|
473
|
+
input_prompt = prompt + output_format + "\n-----input----\n" + input_
|
|
474
|
+
ai_generate_result = self.llm.aproduct_stream(input_prompt)
|
|
475
|
+
ai_result = ""
|
|
476
|
+
async for word in ai_generate_result:
|
|
477
|
+
ai_result += word
|
|
478
|
+
yield word
|
|
479
|
+
|
|
480
|
+
elif result_obj.action_type == "finetune":
|
|
481
|
+
demand = result_obj.demand
|
|
482
|
+
|
|
483
|
+
assert demand
|
|
484
|
+
await self.prompt_finetune_to_sql(prompt_id = prompt_id,
|
|
485
|
+
demand = demand,
|
|
486
|
+
session = session
|
|
487
|
+
)
|
|
488
|
+
input_prompt = prompt + output_format + "\n-----input----\n" + input_
|
|
489
|
+
ai_generate_result = self.llm.aproduct_stream(input_prompt)
|
|
490
|
+
ai_result = ""
|
|
491
|
+
async for word in ai_generate_result:
|
|
492
|
+
ai_result += word
|
|
493
|
+
yield word
|
|
494
|
+
|
|
495
|
+
elif result_obj.action_type == "devide":
|
|
496
|
+
pass
|
|
497
|
+
else:
|
|
498
|
+
raise
|
|
499
|
+
|
|
500
|
+
async def aintellect_remove_format(self,
|
|
501
|
+
input_data: dict | str,
|
|
502
|
+
OutputFormat: object,
|
|
503
|
+
prompt_id: str,
|
|
504
|
+
version: str = None,
|
|
505
|
+
inference_save_case = True,
|
|
506
|
+
):
|
|
507
|
+
|
|
508
|
+
base_format_prompt = """
|
|
509
|
+
按照一定格式输出, 以便可以通过如下校验
|
|
510
|
+
|
|
511
|
+
使用以下正则检出
|
|
512
|
+
"```json([\s\S]*?)```"
|
|
513
|
+
使用以下方式验证
|
|
514
|
+
"""
|
|
515
|
+
output_format = base_format_prompt + inspect.getsource(OutputFormat)
|
|
516
|
+
|
|
517
|
+
ai_result = await self.aintellect_remove(
|
|
518
|
+
input_data=input_data,
|
|
519
|
+
output_format=output_format,
|
|
520
|
+
prompt_id=prompt_id,
|
|
521
|
+
version=version,
|
|
522
|
+
inference_save_case=inference_save_case
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
try:
|
|
526
|
+
ai_result = json.loads(extract_(ai_result,r'json'))
|
|
527
|
+
OutputFormat(**ai_result)
|
|
528
|
+
except ValidationError as e:
|
|
529
|
+
log_ = "记忆卡片合并 - 大模型生成的格式未通过校验"
|
|
530
|
+
logger.error(log_)
|
|
531
|
+
logger.error(f"错误类型: {type(e)}")
|
|
532
|
+
logger.error(f"错误信息: {e}")
|
|
533
|
+
logger.error(f"错误详情 (errors()): {e.errors()}")
|
|
534
|
+
logger.error(f"错误详情 (json()): {e.json(indent=2)}")
|
|
535
|
+
raise ValidationError(log_)
|
|
536
|
+
|
|
537
|
+
return ai_result
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
def aintellect_remove_warp(self,prompt_id: str):
|
|
541
|
+
def outer_packing(func):
|
|
542
|
+
@functools.wraps(func)
|
|
543
|
+
async def wrapper(*args, **kwargs):
|
|
544
|
+
# 修改逻辑
|
|
545
|
+
assert kwargs.get('input_data') # 要求一定要有data入参
|
|
546
|
+
input_data = kwargs.get('input_data')
|
|
547
|
+
assert kwargs.get('OutputFormat') # 要求一定要有data入参
|
|
548
|
+
OutputFormat = kwargs.get('OutputFormat')
|
|
549
|
+
|
|
550
|
+
if isinstance(input_data,dict):
|
|
551
|
+
input_ = output_ = json.dumps(input_data,ensure_ascii=False)
|
|
552
|
+
elif isinstance(input_data,str):
|
|
553
|
+
input_ = output_ = input_data
|
|
554
|
+
|
|
555
|
+
output_ = await self.aintellect_remove_format(
|
|
556
|
+
input_data = input_data,
|
|
557
|
+
prompt_id = prompt_id,
|
|
558
|
+
OutputFormat = OutputFormat,
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
#######
|
|
562
|
+
kwargs.update({"input_data":output_})
|
|
563
|
+
result = await func(*args, **kwargs)
|
|
564
|
+
return result
|
|
565
|
+
return wrapper
|
|
566
|
+
return outer_packing
|