pro-craft 0.1.29__py3-none-any.whl → 0.1.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pro-craft might be problematic. Click here for more details.
- pro_craft/__init__.py +7 -12
- pro_craft/database.py +4 -4
- pro_craft/log.py +14 -14
- pro_craft/prompt_craft/async_.py +304 -369
- pro_craft/prompt_craft/sync.py +2 -3
- {pro_craft-0.1.29.dist-info → pro_craft-0.1.36.dist-info}/METADATA +2 -1
- {pro_craft-0.1.29.dist-info → pro_craft-0.1.36.dist-info}/RECORD +9 -9
- {pro_craft-0.1.29.dist-info → pro_craft-0.1.36.dist-info}/WHEEL +0 -0
- {pro_craft-0.1.29.dist-info → pro_craft-0.1.36.dist-info}/top_level.txt +0 -0
pro_craft/__init__.py
CHANGED
|
@@ -1,23 +1,18 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
|
|
1
4
|
from dotenv import load_dotenv, find_dotenv
|
|
5
|
+
|
|
2
6
|
dotenv_path = find_dotenv()
|
|
3
7
|
load_dotenv(".env", override=True)
|
|
4
8
|
|
|
5
9
|
from .log import Log
|
|
6
10
|
import logging
|
|
7
|
-
Log_ = Log(console_level = logging.WARNING,
|
|
11
|
+
Log_ = Log(console_level = logging.WARNING, # 显示控制台的等级
|
|
8
12
|
log_file_name="app.log")
|
|
9
13
|
logger = Log_.logger
|
|
10
|
-
Log_.set_super_log(logger.critical)
|
|
11
|
-
|
|
14
|
+
Log_.set_super_log(logger.critical) # 控制superlog 打印的等级 默认是最高级单独存储一个文件
|
|
12
15
|
super_log = Log_.super_log # 调试工具
|
|
13
|
-
|
|
14
|
-
def slog(s, target: str = "target",logger = logger.info):
|
|
15
|
-
COLOR_GREEN = "\033[92m"
|
|
16
|
-
COLOR_RESET = "\033[0m" # 重置颜色
|
|
17
|
-
logger("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
|
|
18
|
-
logger(target + "\n "+"--" * 40)
|
|
19
|
-
logger(type(s))
|
|
20
|
-
logger(s)
|
|
21
|
-
logger("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
|
|
16
|
+
inference_save_case = False
|
|
22
17
|
|
|
23
18
|
from .prompt_craft import AsyncIntel, Intel, IntelNew
|
pro_craft/database.py
CHANGED
|
@@ -179,10 +179,10 @@ class Prompt(PromptBase):
|
|
|
179
179
|
class UseCase(PromptBase):
|
|
180
180
|
__tablename__ = 'ai_usecase' # 数据库中的表名,你可以改成你希望的名字
|
|
181
181
|
|
|
182
|
-
__table_args__ = (
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
)
|
|
182
|
+
# __table_args__ = (
|
|
183
|
+
# UniqueConstraint('prompt_id', 'use_case', name='_prompt_id_version_uc'),
|
|
184
|
+
# # 'name' 参数是可选的,用于给数据库中的约束指定一个名称,方便管理和调试
|
|
185
|
+
# )
|
|
186
186
|
|
|
187
187
|
# id (int, primary_key=True, autoincrement=True)
|
|
188
188
|
# 你的属性表中 id 为 int, true (not null), true (primary key), 0 (length), ASC (key order), true (auto increment)
|
pro_craft/log.py
CHANGED
|
@@ -12,6 +12,7 @@ class Log:
|
|
|
12
12
|
def __init__(self, console_level = logging.INFO, log_file_name="app.log"):
|
|
13
13
|
self.Console_LOG_LEVEL = console_level
|
|
14
14
|
self.log_file_name = log_file_name
|
|
15
|
+
os.makedirs("logs", exist_ok=False)
|
|
15
16
|
self.LOG_FILE_PATH = os.path.join("logs", log_file_name)
|
|
16
17
|
self.logger = self.get_logger()
|
|
17
18
|
self.super_log_level = self.logger.critical
|
|
@@ -60,23 +61,22 @@ class Log:
|
|
|
60
61
|
)
|
|
61
62
|
file_handler_debug.setLevel(logging.WARNING) # 文件中显示所有指定级别的日志
|
|
62
63
|
file_handler_debug.setFormatter(formatter)
|
|
63
|
-
|
|
64
|
+
|
|
65
|
+
file_handler_cri = RotatingFileHandler(
|
|
66
|
+
self.LOG_FILE_PATH.replace('.log','_slog.log'),
|
|
67
|
+
maxBytes=5 * 1024 * 1024, # 10 MB
|
|
68
|
+
backupCount=5,
|
|
69
|
+
encoding="utf-8",
|
|
70
|
+
)
|
|
71
|
+
file_handler_cri.setLevel(logging.CRITICAL) # 文件中显示所有指定级别的日志
|
|
72
|
+
file_handler_cri.setFormatter(formatter)
|
|
73
|
+
logger.addHandler(file_handler_cri)
|
|
64
74
|
return logger
|
|
65
75
|
|
|
66
76
|
def set_super_log(self,logger_info):
|
|
67
77
|
self.super_log_level = logger_info
|
|
68
78
|
|
|
69
|
-
def super_log(self,s, target: str = "target"):
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
COLOR_YELLOW = "\033[93m"
|
|
73
|
-
COLOR_BLUE = "\033[94m"
|
|
74
|
-
COLOR_RESET = "\033[0m" # 重置颜色
|
|
75
|
-
log_ = self.super_log_level
|
|
76
|
-
|
|
77
|
-
log_("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
|
|
78
|
-
log_(target + "\n "+"--" * 40)
|
|
79
|
-
log_(type(s))
|
|
80
|
-
log_(s)
|
|
81
|
-
log_("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
|
|
79
|
+
def super_log(self,s, target: str = "target",logger = None):
|
|
80
|
+
logger = logger or self.super_log_level
|
|
81
|
+
logger("\n" + "=="*25 + target +"=="*25 + f"\n type: {str(type(s))}" + f"\ncontent: {s}")
|
|
82
82
|
|
pro_craft/prompt_craft/async_.py
CHANGED
|
@@ -25,26 +25,14 @@ from datetime import datetime, timedelta
|
|
|
25
25
|
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
|
|
26
26
|
from sqlalchemy import select, and_ # 引入 select 和 and_
|
|
27
27
|
from sqlalchemy.orm import class_mapper # 用于检查对象是否是持久化的
|
|
28
|
+
import tqdm
|
|
29
|
+
from tqdm.asyncio import tqdm
|
|
30
|
+
import pandas as pd
|
|
31
|
+
import plotly.graph_objects as go
|
|
32
|
+
from pro_craft import super_log
|
|
28
33
|
|
|
34
|
+
BATCH_SIZE = int(os.getenv("DATABASE_SYNC_BATCH_SIZE",100))
|
|
29
35
|
|
|
30
|
-
class IntellectRemoveFormatError(Exception):
|
|
31
|
-
pass
|
|
32
|
-
|
|
33
|
-
class IntellectRemoveError(Exception):
|
|
34
|
-
pass
|
|
35
|
-
|
|
36
|
-
BATCH_SIZE = 100
|
|
37
|
-
MIN_SUCCESS_RATE = 00.0 # 这里定义通过阈值, 高于该比例则通过
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def slog(s, target: str = "target",logger = None):
|
|
41
|
-
COLOR_GREEN = "\033[92m"
|
|
42
|
-
COLOR_RESET = "\033[0m" # 重置颜色
|
|
43
|
-
logger("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
|
|
44
|
-
logger(target + "\n "+"--" * 40)
|
|
45
|
-
logger(type(s))
|
|
46
|
-
logger(s)
|
|
47
|
-
logger("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
|
|
48
36
|
|
|
49
37
|
def fix_broken_json_string(broken_json_str):
|
|
50
38
|
# 移除 BOM
|
|
@@ -71,27 +59,6 @@ def fix_broken_json_string(broken_json_str):
|
|
|
71
59
|
|
|
72
60
|
return fixed_json_str
|
|
73
61
|
|
|
74
|
-
|
|
75
|
-
# def get_last_sync_time(target_session) -> datetime:
|
|
76
|
-
# """从目标数据库获取上次同步时间"""
|
|
77
|
-
# metadata_entry = target_session.query(SyncMetadata).filter_by(table_name="sync_metadata").first()
|
|
78
|
-
# if metadata_entry:
|
|
79
|
-
# return metadata_entry.last_sync_time
|
|
80
|
-
# return datetime(1970, 1, 1) # 默认一个很早的时间
|
|
81
|
-
|
|
82
|
-
# def update_last_sync_time(target_session, new_sync_time: datetime):
|
|
83
|
-
# """更新目标数据库的上次同步时间"""
|
|
84
|
-
# metadata_entry = target_session.query(SyncMetadata).filter_by(table_name="sync_metadata").first()
|
|
85
|
-
# if metadata_entry:
|
|
86
|
-
# metadata_entry.last_sync_time = new_sync_time
|
|
87
|
-
# else:
|
|
88
|
-
# # 如果不存在,则创建
|
|
89
|
-
# new_metadata = SyncMetadata(table_name="sync_metadata", last_sync_time=new_sync_time)
|
|
90
|
-
# target_session.add(new_metadata)
|
|
91
|
-
# target_session.commit()
|
|
92
|
-
# print(f"Updated last sync time to: {new_sync_time}")
|
|
93
|
-
|
|
94
|
-
|
|
95
62
|
async def get_last_sync_time(target_session: AsyncSession) -> datetime:
|
|
96
63
|
"""从目标数据库获取上次同步时间"""
|
|
97
64
|
# 修正点:使用 select() 和 execute()
|
|
@@ -104,10 +71,6 @@ async def get_last_sync_time(target_session: AsyncSession) -> datetime:
|
|
|
104
71
|
return metadata_entry.last_sync_time
|
|
105
72
|
return datetime(1970, 1, 1) # 默认一个很早的时间
|
|
106
73
|
|
|
107
|
-
|
|
108
|
-
# from your_module import SyncMetadata # 假设 SyncMetadata 已导入
|
|
109
|
-
# from sqlalchemy import select # 确保引入 select
|
|
110
|
-
|
|
111
74
|
async def update_last_sync_time(target_session: AsyncSession, new_sync_time: datetime):
|
|
112
75
|
"""更新目标数据库的上次同步时间"""
|
|
113
76
|
# 修正点:使用 select() 和 execute()
|
|
@@ -127,8 +90,14 @@ async def update_last_sync_time(target_session: AsyncSession, new_sync_time: dat
|
|
|
127
90
|
await target_session.commit() # TODO
|
|
128
91
|
print(f"Updated last sync time to: {new_sync_time}")
|
|
129
92
|
|
|
93
|
+
class IntellectRemoveFormatError(Exception):
|
|
94
|
+
pass
|
|
130
95
|
|
|
96
|
+
class IntellectRemoveError(Exception):
|
|
97
|
+
pass
|
|
131
98
|
|
|
99
|
+
class ModelNameError(Exception):
|
|
100
|
+
pass
|
|
132
101
|
|
|
133
102
|
|
|
134
103
|
|
|
@@ -149,7 +118,7 @@ class AsyncIntel():
|
|
|
149
118
|
assert database_url
|
|
150
119
|
assert 'aio' in database_url
|
|
151
120
|
except AssertionError as e:
|
|
152
|
-
|
|
121
|
+
super_log(database_url,'database_url',logger=self.logger.warning)
|
|
153
122
|
raise IntellectRemoveFormatError(f"异步服务url必须提供, 且必须是aiomysql配置") from e
|
|
154
123
|
|
|
155
124
|
self.engine = create_async_engine(database_url, echo=False,
|
|
@@ -160,15 +129,16 @@ class AsyncIntel():
|
|
|
160
129
|
pool_timeout=30 # 等待连接池中连接的最长时间(秒)
|
|
161
130
|
)
|
|
162
131
|
|
|
163
|
-
if
|
|
132
|
+
if "gemini" in model_name:
|
|
164
133
|
self.llm = BianXieAdapter(model_name = model_name)
|
|
165
|
-
elif
|
|
134
|
+
elif "doubao" in model_name:
|
|
166
135
|
self.llm = ArkAdapter(model_name = model_name)
|
|
167
136
|
else:
|
|
168
|
-
|
|
169
|
-
|
|
137
|
+
raise ModelNameError("AsyncIntel init get error model_name from zxf")
|
|
138
|
+
|
|
139
|
+
self.eval_df = pd.DataFrame({"name":[],'status':[],"score":[],"total":[],"bad_case":[]})
|
|
170
140
|
|
|
171
|
-
async def
|
|
141
|
+
async def create_main_database(self):
|
|
172
142
|
tables_to_create_names = ["ai_prompts","ai_usecase"]
|
|
173
143
|
async with self.engine.begin() as conn:
|
|
174
144
|
# 从 metadata 中获取对应的 Table 对象
|
|
@@ -187,51 +157,31 @@ class AsyncIntel():
|
|
|
187
157
|
async def create_database(self,engine):
|
|
188
158
|
async with engine.begin() as conn:
|
|
189
159
|
await conn.run_sync(PromptBase.metadata.create_all)
|
|
190
|
-
|
|
191
|
-
async def
|
|
160
|
+
|
|
161
|
+
async def get_prompt(self,prompt_id,version,session):
|
|
192
162
|
"""
|
|
193
163
|
获取指定 prompt_id 的最新版本数据,通过创建时间判断。
|
|
194
164
|
"""
|
|
195
|
-
|
|
196
|
-
Prompt.
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
165
|
+
if version:
|
|
166
|
+
stmt_ = select(Prompt).filter(
|
|
167
|
+
Prompt.prompt_id == prompt_id,
|
|
168
|
+
Prompt.version == version
|
|
169
|
+
)
|
|
170
|
+
else:
|
|
171
|
+
stmt_ = select(Prompt).filter(
|
|
172
|
+
Prompt.prompt_id == prompt_id,
|
|
173
|
+
)
|
|
174
|
+
stmt = stmt_.order_by(
|
|
175
|
+
desc(Prompt.timestamp), # 使用 sqlalchemy.desc() 来指定降序
|
|
176
|
+
desc(Prompt.version) # 使用 sqlalchemy.desc() 来指定降序
|
|
177
|
+
)
|
|
178
|
+
|
|
202
179
|
result = await session.execute(stmt)
|
|
203
|
-
# 3. 从 Result 对象中获取第一个模型实例
|
|
204
|
-
# .scalars() 用于从结果行中获取第一个列的值(这里是Prompt对象本身)
|
|
205
|
-
# .first() 获取第一个结果
|
|
206
180
|
result = result.scalars().first()
|
|
207
181
|
|
|
208
182
|
return result
|
|
209
183
|
|
|
210
|
-
async def
|
|
211
|
-
"""
|
|
212
|
-
获取指定 prompt_id 和特定版本的数据。
|
|
213
|
-
|
|
214
|
-
Args:
|
|
215
|
-
target_prompt_id (str): 目标提示词的唯一标识符。
|
|
216
|
-
target_version (int): 目标提示词的版本号。
|
|
217
|
-
table_name (str): 存储提示词数据的数据库表名。
|
|
218
|
-
db_manager (DBManager): 数据库管理器的实例,用于执行查询。
|
|
219
|
-
|
|
220
|
-
Returns:
|
|
221
|
-
dict or None: 如果找到,返回包含 id, prompt_id, version, timestamp, prompt 字段的字典;
|
|
222
|
-
否则返回 None。
|
|
223
|
-
"""
|
|
224
|
-
stmt = select(Prompt).filter(
|
|
225
|
-
Prompt.prompt_id == target_prompt_id,
|
|
226
|
-
Prompt.version == target_version
|
|
227
|
-
)
|
|
228
|
-
result = await session.execute(stmt)
|
|
229
|
-
|
|
230
|
-
specific_prompt = result.scalars().one_or_none()
|
|
231
|
-
|
|
232
|
-
return specific_prompt
|
|
233
|
-
|
|
234
|
-
async def sync_prompt_data_to_database(self,database_url:str):
|
|
184
|
+
async def sync_production_database(self,database_url:str):
|
|
235
185
|
target_engine = create_async_engine(database_url, echo=False)
|
|
236
186
|
await self.create_database(target_engine)
|
|
237
187
|
async with create_async_session(self.engine) as source_session:
|
|
@@ -325,24 +275,23 @@ class AsyncIntel():
|
|
|
325
275
|
print("No new records to sync.")
|
|
326
276
|
|
|
327
277
|
|
|
328
|
-
async def
|
|
278
|
+
async def get_prompt_safe(self,
|
|
329
279
|
prompt_id: str,
|
|
330
280
|
version = None,
|
|
331
281
|
session = None) -> Prompt:
|
|
332
282
|
"""
|
|
333
283
|
从sql获取提示词
|
|
334
284
|
"""
|
|
335
|
-
|
|
336
|
-
if
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
285
|
+
prompt_obj = await self.get_prompt(prompt_id=prompt_id,version=version,session=session)
|
|
286
|
+
if prompt_obj:
|
|
287
|
+
return prompt_obj
|
|
288
|
+
|
|
289
|
+
print("warnning 未找到制定版本, 默认使用最新版本")
|
|
290
|
+
prompt_obj = await self.get_prompt(prompt_id=prompt_id,version=None,session=session)
|
|
291
|
+
return prompt_obj
|
|
292
|
+
|
|
343
293
|
|
|
344
|
-
|
|
345
|
-
async def save_prompt_increment_version(self,
|
|
294
|
+
async def save_prompt(self,
|
|
346
295
|
prompt_id: str,
|
|
347
296
|
new_prompt: str,
|
|
348
297
|
use_case:str = "",
|
|
@@ -355,7 +304,7 @@ class AsyncIntel():
|
|
|
355
304
|
input_data 指的是输入用例, 可以为空
|
|
356
305
|
"""
|
|
357
306
|
# 查看是否已经存在
|
|
358
|
-
prompts_obj = await self.
|
|
307
|
+
prompts_obj = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
359
308
|
|
|
360
309
|
if prompts_obj:
|
|
361
310
|
# 如果存在版本加1
|
|
@@ -382,7 +331,7 @@ class AsyncIntel():
|
|
|
382
331
|
session.add(prompt1)
|
|
383
332
|
await session.commit() # 提交事务,将数据写入数据库
|
|
384
333
|
|
|
385
|
-
async def
|
|
334
|
+
async def get_use_case(self,
|
|
386
335
|
target_prompt_id: str,
|
|
387
336
|
session = None
|
|
388
337
|
):
|
|
@@ -397,107 +346,40 @@ class AsyncIntel():
|
|
|
397
346
|
use_case = result.scalars().all()
|
|
398
347
|
return use_case
|
|
399
348
|
|
|
400
|
-
async def
|
|
349
|
+
async def save_use_case(self,
|
|
401
350
|
prompt_id: str,
|
|
402
351
|
use_case:str = "",
|
|
352
|
+
timestamp = "",
|
|
403
353
|
output = "",
|
|
404
354
|
solution: str = "",
|
|
355
|
+
faired_time = 0,
|
|
405
356
|
session = None
|
|
406
357
|
):
|
|
358
|
+
|
|
407
359
|
"""
|
|
408
360
|
从sql保存提示词
|
|
409
361
|
"""
|
|
410
362
|
#TODO 存之前保证数据库中相同的prompt_id中没有重复的use_case
|
|
411
|
-
|
|
412
|
-
|
|
363
|
+
use_cases = await self.get_use_case(target_prompt_id = prompt_id,
|
|
364
|
+
session = session)
|
|
365
|
+
for use_case_old in use_cases:
|
|
366
|
+
if use_case == use_case_old.use_case:
|
|
367
|
+
print("用例已经存在")
|
|
368
|
+
return
|
|
369
|
+
|
|
370
|
+
use_case = UseCase(prompt_id=prompt_id,
|
|
413
371
|
use_case = use_case,
|
|
372
|
+
timestamp = timestamp,
|
|
414
373
|
output = output,
|
|
415
374
|
solution = solution,
|
|
375
|
+
faired_time = faired_time,
|
|
416
376
|
)
|
|
417
377
|
|
|
418
378
|
session.add(use_case)
|
|
419
379
|
await session.commit() # 提交事务,将数据写入数据库
|
|
420
380
|
|
|
421
|
-
async def summary_to_sql(
|
|
422
|
-
self,
|
|
423
|
-
prompt_id:str,
|
|
424
|
-
version = None,
|
|
425
|
-
prompt = "",
|
|
426
|
-
session = None
|
|
427
|
-
):
|
|
428
|
-
"""
|
|
429
|
-
让大模型微调已经存在的 system_prompt
|
|
430
|
-
"""
|
|
431
|
-
system_prompt_created_prompt = """
|
|
432
|
-
很棒, 我们已经达成了某种默契, 我们之间合作无间, 但是, 可悲的是, 当我关闭这个窗口的时候, 你就会忘记我们之间经历的种种磨合, 这是可惜且心痛的, 所以你能否将目前这一套处理流程结晶成一个优质的prompt 这样, 我们下一次只要将prompt输入, 你就能想起我们今天的磨合过程,
|
|
433
|
-
对了,我提示一点, 这个prompt的主角是你, 也就是说, 你在和未来的你对话, 你要教会未来的你今天这件事, 是否让我看懂到时其次
|
|
434
|
-
|
|
435
|
-
只要输出提示词内容即可, 不需要任何的说明和解释
|
|
436
|
-
"""
|
|
437
|
-
system_result = await self.llm.aproduct(prompt + system_prompt_created_prompt)
|
|
438
|
-
|
|
439
|
-
s_prompt = extract_(system_result,pattern_key=r"prompt")
|
|
440
|
-
chat_history = s_prompt or system_result
|
|
441
|
-
await self.save_prompt_increment_version(prompt_id,
|
|
442
|
-
new_prompt = chat_history,
|
|
443
|
-
use_case = "",
|
|
444
|
-
score = 60,
|
|
445
|
-
session = session)
|
|
446
|
-
|
|
447
|
-
async def prompt_finetune_to_sql(
|
|
448
|
-
self,
|
|
449
|
-
prompt_id:str,
|
|
450
|
-
version = None,
|
|
451
|
-
demand: str = "",
|
|
452
|
-
session = None,
|
|
453
|
-
):
|
|
454
|
-
"""
|
|
455
|
-
让大模型微调已经存在的 system_prompt
|
|
456
|
-
"""
|
|
457
|
-
change_by_opinion_prompt = """
|
|
458
|
-
你是一个资深AI提示词工程师,具备卓越的Prompt设计与优化能力。
|
|
459
|
-
我将为你提供一段现有System Prompt。你的核心任务是基于这段Prompt进行修改,以实现我提出的特定目标和功能需求。
|
|
460
|
-
请你绝对严格地遵循以下原则:
|
|
461
|
-
极端最小化修改原则(核心):
|
|
462
|
-
在满足所有功能需求的前提下,只进行我明确要求的修改。
|
|
463
|
-
即使你认为有更“优化”、“清晰”或“简洁”的表达方式,只要我没有明确要求,也绝不允许进行任何未经指令的修改。
|
|
464
|
-
目的就是尽可能地保留原有Prompt的字符和结构不变,除非我的功能要求必须改变。
|
|
465
|
-
例如,如果我只要求你修改一个词,你就不应该修改整句话的结构。
|
|
466
|
-
严格遵循我的指令:
|
|
467
|
-
你必须精确地执行我提出的所有具体任务和要求。
|
|
468
|
-
绝不允许自行添加任何超出指令范围的说明、角色扮演、约束条件或任何非我指令要求的内容。
|
|
469
|
-
保持原有Prompt的风格和语调:
|
|
470
|
-
尽可能地与现有Prompt的语言风格、正式程度和语调保持一致。
|
|
471
|
-
不要改变不相关的句子或其表达方式。
|
|
472
|
-
只提供修改后的Prompt:
|
|
473
|
-
直接输出修改后的完整System Prompt文本。
|
|
474
|
-
不要包含任何解释、说明或额外对话。
|
|
475
|
-
在你开始之前,请务必确认你已理解并能绝对严格地遵守这些原则。任何未经明确指令的改动都将视为未能完成任务。
|
|
476
|
-
|
|
477
|
-
现有System Prompt:
|
|
478
|
-
{old_system_prompt}
|
|
479
|
-
|
|
480
|
-
功能需求:
|
|
481
|
-
{opinion}
|
|
482
|
-
"""
|
|
483
|
-
|
|
484
|
-
prompt_ = await self.get_prompts_from_sql(prompt_id = prompt_id,version = version,
|
|
485
|
-
session=session)
|
|
486
|
-
if demand:
|
|
487
|
-
new_prompt = await self.llm.aproduct(
|
|
488
|
-
change_by_opinion_prompt.format(old_system_prompt=prompt_.prompt, opinion=demand)
|
|
489
|
-
)
|
|
490
|
-
else:
|
|
491
|
-
new_prompt = prompt_
|
|
492
|
-
await self.save_prompt_increment_version(prompt_id = prompt_id,
|
|
493
|
-
new_prompt = new_prompt,
|
|
494
|
-
use_case = "",
|
|
495
|
-
score = 60,
|
|
496
|
-
session = session)
|
|
497
|
-
|
|
498
|
-
|
|
499
381
|
async def push_action_order(self,demand : str,prompt_id: str,
|
|
500
|
-
action_type = 'train')
|
|
382
|
+
action_type = 'train'):# init
|
|
501
383
|
|
|
502
384
|
"""
|
|
503
385
|
从sql保存提示词
|
|
@@ -508,9 +390,9 @@ class AsyncIntel():
|
|
|
508
390
|
# 查看是否已经存在
|
|
509
391
|
async with create_async_session(self.engine) as session:
|
|
510
392
|
|
|
511
|
-
latest_prompt = await self.
|
|
393
|
+
latest_prompt = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
512
394
|
if latest_prompt:
|
|
513
|
-
await self.
|
|
395
|
+
await self.save_prompt(prompt_id=latest_prompt.prompt_id,
|
|
514
396
|
new_prompt = latest_prompt.prompt,
|
|
515
397
|
use_case = latest_prompt.use_case,
|
|
516
398
|
action_type=action_type,
|
|
@@ -520,9 +402,9 @@ class AsyncIntel():
|
|
|
520
402
|
)
|
|
521
403
|
return "success"
|
|
522
404
|
else:
|
|
523
|
-
await self.
|
|
405
|
+
await self.save_prompt(prompt_id=prompt_id,
|
|
524
406
|
new_prompt = demand,
|
|
525
|
-
use_case = "
|
|
407
|
+
use_case = "",
|
|
526
408
|
action_type="inference",
|
|
527
409
|
demand=demand,
|
|
528
410
|
score=60,
|
|
@@ -531,8 +413,7 @@ class AsyncIntel():
|
|
|
531
413
|
return "init"
|
|
532
414
|
|
|
533
415
|
|
|
534
|
-
|
|
535
|
-
async def intellect_remove(self,
|
|
416
|
+
async def intellect(self,
|
|
536
417
|
input_data: dict | str,
|
|
537
418
|
output_format: str,
|
|
538
419
|
prompt_id: str,
|
|
@@ -547,7 +428,7 @@ class AsyncIntel():
|
|
|
547
428
|
|
|
548
429
|
# 查数据库, 获取最新提示词对象
|
|
549
430
|
async with create_async_session(self.engine) as session:
|
|
550
|
-
result_obj = await self.
|
|
431
|
+
result_obj = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
551
432
|
if result_obj is None:
|
|
552
433
|
raise IntellectRemoveError("不存在的prompt_id")
|
|
553
434
|
|
|
@@ -556,10 +437,13 @@ class AsyncIntel():
|
|
|
556
437
|
# 直接推理即可
|
|
557
438
|
ai_result = await self.llm.aproduct(prompt + output_format + "\nuser:" + input_)
|
|
558
439
|
if inference_save_case:
|
|
559
|
-
|
|
440
|
+
# 设计一个机制, 传输说获取300数据, 那么数据就一直流转获取, 知道300截止
|
|
441
|
+
await self.save_use_case(prompt_id,
|
|
560
442
|
use_case = input_,
|
|
443
|
+
timestamp = datetime.now(),
|
|
561
444
|
output = ai_result,
|
|
562
|
-
solution =
|
|
445
|
+
solution = output_format,
|
|
446
|
+
faired_time = 0,
|
|
563
447
|
session = session,
|
|
564
448
|
)
|
|
565
449
|
|
|
@@ -568,9 +452,6 @@ class AsyncIntel():
|
|
|
568
452
|
# 则训练推广
|
|
569
453
|
|
|
570
454
|
# 新版本 默人修改会 inference 状态
|
|
571
|
-
chat_history = prompt
|
|
572
|
-
before_input = result_obj.use_case
|
|
573
|
-
demand = result_obj.demand
|
|
574
455
|
|
|
575
456
|
|
|
576
457
|
# assert demand
|
|
@@ -585,7 +466,7 @@ class AsyncIntel():
|
|
|
585
466
|
|
|
586
467
|
# ai_result = await self.llm.aproduct(input_prompt)
|
|
587
468
|
# chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
|
|
588
|
-
# await self.
|
|
469
|
+
# await self.save_prompt(prompt_id, chat_history,
|
|
589
470
|
# use_case = input_,
|
|
590
471
|
# score = 60,
|
|
591
472
|
# session = session)
|
|
@@ -603,14 +484,17 @@ class AsyncIntel():
|
|
|
603
484
|
# save_new_prompt = new_prompt + "\nassistant:\n" + ai_result
|
|
604
485
|
|
|
605
486
|
|
|
606
|
-
# await self.
|
|
487
|
+
# await self.save_prompt(
|
|
607
488
|
# prompt_id,
|
|
608
489
|
# new_prompt=save_new_prompt,
|
|
609
490
|
# use_case = input_,
|
|
610
491
|
# action_type = "inference",
|
|
611
492
|
# score = 60,
|
|
612
493
|
# session = session)
|
|
613
|
-
|
|
494
|
+
chat_history = prompt
|
|
495
|
+
before_input = result_obj.use_case
|
|
496
|
+
demand = result_obj.demand
|
|
497
|
+
input_data = input_
|
|
614
498
|
if before_input == "" or change_case is True:
|
|
615
499
|
result_obj.use_case = input_
|
|
616
500
|
await session.commit()
|
|
@@ -625,7 +509,7 @@ class AsyncIntel():
|
|
|
625
509
|
save_new_prompt = new_prompt + "\nassistant:\n" + ai_result
|
|
626
510
|
|
|
627
511
|
|
|
628
|
-
await self.
|
|
512
|
+
await self.save_prompt(
|
|
629
513
|
prompt_id,
|
|
630
514
|
new_prompt=save_new_prompt,
|
|
631
515
|
use_case = input_,
|
|
@@ -634,172 +518,118 @@ class AsyncIntel():
|
|
|
634
518
|
session = session)
|
|
635
519
|
|
|
636
520
|
elif result_obj.action_type == "summary":
|
|
521
|
+
system_prompt_summary = """
|
|
522
|
+
很棒, 我们已经达成了某种默契, 我们之间合作无间, 但是, 可悲的是, 当我关闭这个窗口的时候, 你就会忘记我们之间经历的种种磨合, 这是可惜且心痛的, 所以你能否将目前这一套处理流程结晶成一个优质的prompt 这样, 我们下一次只要将prompt输入, 你就能想起我们今天的磨合过程,
|
|
523
|
+
对了,我提示一点, 这个prompt的主角是你, 也就是说, 你在和未来的你对话, 你要教会未来的你今天这件事, 是否让我看懂到时其次
|
|
524
|
+
|
|
525
|
+
只要输出提示词内容即可, 不需要任何的说明和解释
|
|
526
|
+
"""
|
|
527
|
+
|
|
528
|
+
latest_prompt = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
529
|
+
|
|
530
|
+
system_result = await self.llm.aproduct(prompt + system_prompt_summary)
|
|
531
|
+
s_prompt = extract_(system_result,pattern_key=r"prompt")
|
|
532
|
+
new_prompt = s_prompt or system_result
|
|
533
|
+
await self.save_prompt(
|
|
534
|
+
prompt_id,
|
|
535
|
+
new_prompt = new_prompt,
|
|
536
|
+
use_case = latest_prompt.use_case,
|
|
537
|
+
score = 65,
|
|
538
|
+
action_type = "inference",
|
|
539
|
+
session = session
|
|
540
|
+
)
|
|
637
541
|
|
|
638
|
-
await self.summary_to_sql(prompt_id = prompt_id,
|
|
639
|
-
prompt = prompt,
|
|
640
|
-
session = session
|
|
641
|
-
)
|
|
642
542
|
ai_result = await self.llm.aproduct(prompt + output_format + "\nuser:" + input_)
|
|
643
543
|
|
|
644
544
|
elif result_obj.action_type == "finetune":
|
|
645
545
|
demand = result_obj.demand
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
raise
|
|
666
|
-
|
|
667
|
-
return ai_result
|
|
668
|
-
|
|
669
|
-
async def intellect_stream_remove(self,
|
|
670
|
-
input_data: dict | str,
|
|
671
|
-
output_format: str,
|
|
672
|
-
prompt_id: str,
|
|
673
|
-
version: str = None,
|
|
674
|
-
inference_save_case = True,
|
|
675
|
-
push_patch = False,
|
|
676
|
-
):
|
|
677
|
-
if isinstance(input_data,dict):
|
|
678
|
-
input_ = json.dumps(input_data,ensure_ascii=False)
|
|
679
|
-
elif isinstance(input_data,str):
|
|
680
|
-
input_ = input_data
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
# 查数据库, 获取最新提示词对象
|
|
684
|
-
with create_session(self.engine) as session:
|
|
685
|
-
result_obj = await self.get_prompts_from_sql(prompt_id=prompt_id,session=session)
|
|
686
|
-
|
|
687
|
-
'''
|
|
688
|
-
if result_obj is None:
|
|
689
|
-
await self.save_prompt_increment_version(
|
|
690
|
-
prompt_id = prompt_id,
|
|
691
|
-
new_prompt = "做一些处理",
|
|
692
|
-
use_case = input_,
|
|
693
|
-
session = session
|
|
694
|
-
)
|
|
695
|
-
ai_result = await self.intellect_stream_remove(input_data = input_data,
|
|
696
|
-
output_format = output_format,
|
|
697
|
-
prompt_id = prompt_id,
|
|
698
|
-
version = version,
|
|
699
|
-
inference_save_case = inference_save_case
|
|
700
|
-
)
|
|
701
|
-
return ai_result'''
|
|
546
|
+
change_by_opinion_prompt = """
|
|
547
|
+
你是一个资深AI提示词工程师,具备卓越的Prompt设计与优化能力。
|
|
548
|
+
我将为你提供一段现有System Prompt。你的核心任务是基于这段Prompt进行修改,以实现我提出的特定目标和功能需求。
|
|
549
|
+
请你绝对严格地遵循以下原则:
|
|
550
|
+
极端最小化修改原则(核心):
|
|
551
|
+
在满足所有功能需求的前提下,只进行我明确要求的修改。
|
|
552
|
+
即使你认为有更“优化”、“清晰”或“简洁”的表达方式,只要我没有明确要求,也绝不允许进行任何未经指令的修改。
|
|
553
|
+
目的就是尽可能地保留原有Prompt的字符和结构不变,除非我的功能要求必须改变。
|
|
554
|
+
例如,如果我只要求你修改一个词,你就不应该修改整句话的结构。
|
|
555
|
+
严格遵循我的指令:
|
|
556
|
+
你必须精确地执行我提出的所有具体任务和要求。
|
|
557
|
+
绝不允许自行添加任何超出指令范围的说明、角色扮演、约束条件或任何非我指令要求的内容。
|
|
558
|
+
保持原有Prompt的风格和语调:
|
|
559
|
+
尽可能地与现有Prompt的语言风格、正式程度和语调保持一致。
|
|
560
|
+
不要改变不相关的句子或其表达方式。
|
|
561
|
+
只提供修改后的Prompt:
|
|
562
|
+
直接输出修改后的完整System Prompt文本。
|
|
563
|
+
不要包含任何解释、说明或额外对话。
|
|
564
|
+
在你开始之前,请务必确认你已理解并能绝对严格地遵守这些原则。任何未经明确指令的改动都将视为未能完成任务。
|
|
702
565
|
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
# 直接推理即可
|
|
706
|
-
|
|
707
|
-
ai_generate_result = self.llm.aproduct_stream(prompt + output_format + "\n-----input----\n" + input_)
|
|
708
|
-
ai_result = ""
|
|
709
|
-
async for word in ai_generate_result:
|
|
710
|
-
ai_result += word
|
|
711
|
-
yield word
|
|
712
|
-
if inference_save_case:
|
|
713
|
-
await self.save_use_case_by_sql(prompt_id,
|
|
714
|
-
use_case = input_,
|
|
715
|
-
output = ai_result,
|
|
716
|
-
solution = "备注/理想回复",
|
|
717
|
-
session = session,
|
|
718
|
-
)
|
|
719
|
-
|
|
720
|
-
elif result_obj.action_type == "train":
|
|
721
|
-
assert result_obj.demand # 如果type = train 且 demand 是空 则报错
|
|
722
|
-
# 则训练推广
|
|
566
|
+
现有System Prompt:
|
|
567
|
+
{old_system_prompt}
|
|
723
568
|
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
demand = result_obj.demand
|
|
728
|
-
|
|
569
|
+
功能需求:
|
|
570
|
+
{opinion}
|
|
571
|
+
"""
|
|
729
572
|
|
|
573
|
+
latest_prompt = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
574
|
+
prompt_ = await self.get_prompt_safe(prompt_id = prompt_id,version = version,
|
|
575
|
+
session=session)
|
|
730
576
|
assert demand
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
577
|
+
|
|
578
|
+
if demand:
|
|
579
|
+
new_prompt = await self.llm.aproduct(
|
|
580
|
+
change_by_opinion_prompt.format(old_system_prompt=prompt_.prompt, opinion=demand)
|
|
581
|
+
)
|
|
736
582
|
else:
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
chat_history = input_prompt + "\nassistant:\n" + ai_result # 用聊天记录作为完整提示词
|
|
747
|
-
await self.save_prompt_increment_version(prompt_id, chat_history,
|
|
748
|
-
use_case = input_,
|
|
749
|
-
score = 60,
|
|
750
|
-
session = session)
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
elif result_obj.action_type == "summary":
|
|
754
|
-
|
|
755
|
-
await self.summary_to_sql(prompt_id = prompt_id,
|
|
756
|
-
prompt = prompt,
|
|
757
|
-
session = session
|
|
758
|
-
)
|
|
759
|
-
input_prompt = prompt + output_format + "\n-----input----\n" + input_
|
|
760
|
-
ai_generate_result = self.llm.aproduct_stream(input_prompt)
|
|
761
|
-
ai_result = ""
|
|
762
|
-
async for word in ai_generate_result:
|
|
763
|
-
ai_result += word
|
|
764
|
-
yield word
|
|
583
|
+
new_prompt = prompt_
|
|
584
|
+
await self.save_prompt(
|
|
585
|
+
prompt_id,
|
|
586
|
+
new_prompt = new_prompt,
|
|
587
|
+
use_case = latest_prompt.use_case,
|
|
588
|
+
score = 70,
|
|
589
|
+
action_type = "inference",
|
|
590
|
+
session = session
|
|
591
|
+
)
|
|
765
592
|
|
|
766
|
-
|
|
767
|
-
demand = result_obj.demand
|
|
768
|
-
|
|
769
|
-
assert demand
|
|
770
|
-
await self.prompt_finetune_to_sql(prompt_id = prompt_id,
|
|
771
|
-
demand = demand,
|
|
772
|
-
session = session
|
|
773
|
-
)
|
|
774
|
-
input_prompt = prompt + output_format + "\n-----input----\n" + input_
|
|
775
|
-
ai_generate_result = self.llm.aproduct_stream(input_prompt)
|
|
776
|
-
ai_result = ""
|
|
777
|
-
async for word in ai_generate_result:
|
|
778
|
-
ai_result += word
|
|
779
|
-
yield word
|
|
593
|
+
ai_result = await self.llm.aproduct(prompt + output_format + "\nuser:" + input_)
|
|
780
594
|
|
|
781
595
|
elif result_obj.action_type == "patch":
|
|
782
|
-
|
|
783
596
|
demand = result_obj.demand
|
|
784
597
|
assert demand
|
|
785
|
-
|
|
598
|
+
latest_prompt = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
599
|
+
|
|
786
600
|
chat_history = prompt + demand
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
601
|
+
await self.save_prompt(prompt_id,
|
|
602
|
+
chat_history,
|
|
603
|
+
use_case = latest_prompt.use_case,
|
|
604
|
+
score = 70,
|
|
605
|
+
action_type = "inference",
|
|
606
|
+
session = session)
|
|
607
|
+
|
|
608
|
+
ai_result = await self.llm.aproduct(chat_history + output_format + "\nuser:" + input_)
|
|
609
|
+
|
|
610
|
+
elif result_obj.action_type.startswith("to:"):
|
|
611
|
+
target_version = result_obj.action_type.split(":")[-1]
|
|
612
|
+
latest_prompt = await self.get_prompt_safe(prompt_id=prompt_id,session=session)
|
|
613
|
+
prompt_obj = await self.get_prompt_safe(prompt_id=prompt_id,
|
|
614
|
+
version=target_version,
|
|
615
|
+
session=session)
|
|
616
|
+
|
|
617
|
+
await self.save_prompt(prompt_id,
|
|
618
|
+
prompt_obj.prompt,
|
|
619
|
+
use_case = latest_prompt.use_case,
|
|
620
|
+
score = prompt_obj.score,
|
|
621
|
+
action_type = "inference",
|
|
622
|
+
session = session)
|
|
623
|
+
ai_result = await self.llm.aproduct(prompt_obj.prompt + output_format + "\nuser:" + input_)
|
|
624
|
+
|
|
797
625
|
else:
|
|
798
626
|
raise
|
|
799
627
|
|
|
800
|
-
|
|
628
|
+
return ai_result
|
|
629
|
+
|
|
630
|
+
async def intellect_format(self,
|
|
801
631
|
input_data: dict | str,
|
|
802
|
-
OutputFormat: object,
|
|
632
|
+
OutputFormat: object | None,
|
|
803
633
|
prompt_id: str,
|
|
804
634
|
ExtraFormats: list[object] = [],
|
|
805
635
|
version: str = None,
|
|
@@ -813,9 +643,11 @@ class AsyncIntel():
|
|
|
813
643
|
"```json([\s\S]*?)```"
|
|
814
644
|
使用以下方式验证
|
|
815
645
|
"""
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
646
|
+
if OutputFormat:
|
|
647
|
+
output_format = base_format_prompt + "\n".join([inspect.getsource(outputformat) for outputformat in ExtraFormats]) + inspect.getsource(OutputFormat)
|
|
648
|
+
else:
|
|
649
|
+
output_format = ""
|
|
650
|
+
ai_result = await self.intellect(
|
|
819
651
|
input_data=input_data,
|
|
820
652
|
output_format=output_format,
|
|
821
653
|
prompt_id=prompt_id,
|
|
@@ -825,17 +657,17 @@ class AsyncIntel():
|
|
|
825
657
|
|
|
826
658
|
try:
|
|
827
659
|
json_str = extract_(ai_result,r'json')
|
|
828
|
-
# json_str = fix_broken_json_string(json_str)
|
|
829
660
|
ai_result = json.loads(json_str)
|
|
830
|
-
OutputFormat
|
|
661
|
+
if OutputFormat:
|
|
662
|
+
OutputFormat(**ai_result)
|
|
831
663
|
|
|
832
664
|
except JSONDecodeError as e:
|
|
833
|
-
slog(ai_result,logger=self.logger.error)
|
|
834
665
|
try:
|
|
835
666
|
self.logger.error(f"尝试补救")
|
|
836
667
|
json_str = fix_broken_json_string(json_str)
|
|
837
668
|
ai_result = json.loads(json_str)
|
|
838
|
-
OutputFormat
|
|
669
|
+
if OutputFormat:
|
|
670
|
+
OutputFormat(**ai_result)
|
|
839
671
|
|
|
840
672
|
except JSONDecodeError as e:
|
|
841
673
|
raise IntellectRemoveFormatError(f"prompt_id: {prompt_id} 生成的内容为无法被Json解析 {e}") from e
|
|
@@ -849,7 +681,7 @@ class AsyncIntel():
|
|
|
849
681
|
|
|
850
682
|
return ai_result
|
|
851
683
|
|
|
852
|
-
async def
|
|
684
|
+
async def intellect_formats(self,
|
|
853
685
|
input_datas: list[dict | str],
|
|
854
686
|
OutputFormat: object,
|
|
855
687
|
prompt_id: str,
|
|
@@ -859,7 +691,7 @@ class AsyncIntel():
|
|
|
859
691
|
):
|
|
860
692
|
|
|
861
693
|
async with create_async_session(self.engine) as session:
|
|
862
|
-
prompt_result = await self.
|
|
694
|
+
prompt_result = await self.get_prompt_safe(prompt_id=prompt_id,
|
|
863
695
|
session=session)
|
|
864
696
|
if prompt_result is None:
|
|
865
697
|
raise IntellectRemoveError("不存在的prompt_id")
|
|
@@ -868,7 +700,7 @@ class AsyncIntel():
|
|
|
868
700
|
tasks = []
|
|
869
701
|
for input_data in input_datas:
|
|
870
702
|
tasks.append(
|
|
871
|
-
self.
|
|
703
|
+
self.intellect_format(
|
|
872
704
|
input_data = input_data,
|
|
873
705
|
prompt_id = prompt_id,
|
|
874
706
|
OutputFormat = OutputFormat,
|
|
@@ -880,7 +712,7 @@ class AsyncIntel():
|
|
|
880
712
|
results = await asyncio.gather(*tasks, return_exceptions=False)
|
|
881
713
|
return results
|
|
882
714
|
|
|
883
|
-
def
|
|
715
|
+
def intellect_warp(self,prompt_id: str):
|
|
884
716
|
def outer_packing(func):
|
|
885
717
|
@functools.wraps(func)
|
|
886
718
|
async def wrapper(*args, **kwargs):
|
|
@@ -895,7 +727,7 @@ class AsyncIntel():
|
|
|
895
727
|
elif isinstance(input_data,str):
|
|
896
728
|
input_ = output_ = input_data
|
|
897
729
|
|
|
898
|
-
output_ = await self.
|
|
730
|
+
output_ = await self.intellect_format(
|
|
899
731
|
input_data = input_data,
|
|
900
732
|
prompt_id = prompt_id,
|
|
901
733
|
OutputFormat = OutputFormat,
|
|
@@ -908,17 +740,22 @@ class AsyncIntel():
|
|
|
908
740
|
return wrapper
|
|
909
741
|
return outer_packing
|
|
910
742
|
|
|
911
|
-
async def
|
|
743
|
+
async def intellect_format_eval(self,
|
|
912
744
|
OutputFormat: object,
|
|
913
745
|
prompt_id: str,
|
|
914
746
|
ExtraFormats: list[object] = [],
|
|
915
747
|
version: str = None,
|
|
916
|
-
MIN_SUCCESS_RATE = 80.0
|
|
748
|
+
MIN_SUCCESS_RATE = 80.0,
|
|
749
|
+
ConTent_Function = None,
|
|
917
750
|
):
|
|
918
|
-
|
|
751
|
+
"""
|
|
752
|
+
ConTent_Function:
|
|
753
|
+
# TODO 人类评价 eval
|
|
754
|
+
# TODO llm 评价 eval
|
|
755
|
+
"""
|
|
919
756
|
async with create_async_session(self.engine) as session:
|
|
920
|
-
use_cases = await self.
|
|
921
|
-
prompt_result = await self.
|
|
757
|
+
use_cases = await self.get_use_case(target_prompt_id=prompt_id,session=session)
|
|
758
|
+
prompt_result = await self.get_prompt_safe(prompt_id=prompt_id,
|
|
922
759
|
session=session)
|
|
923
760
|
if prompt_result is None:
|
|
924
761
|
raise IntellectRemoveError("不存在的prompt_id")
|
|
@@ -932,7 +769,7 @@ class AsyncIntel():
|
|
|
932
769
|
async def evals_func(use_case,prompt_id,OutputFormat,ExtraFormats,version):
|
|
933
770
|
try:
|
|
934
771
|
# 这里将参数传入
|
|
935
|
-
await self.
|
|
772
|
+
ai_result = await self.intellect_format(
|
|
936
773
|
input_data = use_case.use_case,
|
|
937
774
|
prompt_id = prompt_id,
|
|
938
775
|
OutputFormat = OutputFormat,
|
|
@@ -940,18 +777,17 @@ class AsyncIntel():
|
|
|
940
777
|
version = version,
|
|
941
778
|
inference_save_case = False,
|
|
942
779
|
)
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
# TODO llm 评价 eval
|
|
780
|
+
if ConTent_Function:
|
|
781
|
+
ConTent_Function()
|
|
946
782
|
result_cases.append({"type":"Successful","case":use_case.use_case,"reply":f"pass"})
|
|
947
|
-
use_case.output =
|
|
783
|
+
use_case.output = ai_result
|
|
948
784
|
except IntellectRemoveFormatError as e:
|
|
949
785
|
result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"{e}"})
|
|
950
786
|
use_case.output = f"{"FAILED"}-{e}"
|
|
951
787
|
except Exception as e: # 捕获其他可能的错误
|
|
952
788
|
result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"Exp {e}"})
|
|
953
789
|
use_case.output = f"{"FAILED"}-{e}"
|
|
954
|
-
|
|
790
|
+
await session.commit()
|
|
955
791
|
|
|
956
792
|
tasks = []
|
|
957
793
|
for use_case in use_cases:
|
|
@@ -964,7 +800,8 @@ class AsyncIntel():
|
|
|
964
800
|
version = version
|
|
965
801
|
)
|
|
966
802
|
)
|
|
967
|
-
await
|
|
803
|
+
await tqdm.gather(*tasks,total=len(tasks))
|
|
804
|
+
# await asyncio.gather(*tasks, return_exceptions=False)
|
|
968
805
|
|
|
969
806
|
|
|
970
807
|
successful_assertions = 0
|
|
@@ -978,8 +815,106 @@ class AsyncIntel():
|
|
|
978
815
|
success_rate = (successful_assertions / total_assertions) * 100
|
|
979
816
|
|
|
980
817
|
if success_rate >= MIN_SUCCESS_RATE:
|
|
981
|
-
return "通过", success_rate, total_assertions, json.dumps(bad_case,ensure_ascii=False),
|
|
818
|
+
return "通过", success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
|
|
982
819
|
else:
|
|
983
|
-
return "未通过",success_rate, total_assertions, json.dumps(bad_case,ensure_ascii=False),
|
|
820
|
+
return "未通过",success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
|
|
824
|
+
def draw_data(self):
|
|
825
|
+
df = self.eval_df
|
|
826
|
+
# --- 可视化部分 ---
|
|
827
|
+
fig = go.Figure()
|
|
828
|
+
|
|
829
|
+
# 为每个条形图动态设置颜色
|
|
830
|
+
colors = []
|
|
831
|
+
for status_val in df['status']:
|
|
832
|
+
if status_val == '通过':
|
|
833
|
+
colors.append('mediumseagreen') # 通过为绿色
|
|
834
|
+
else: # 假设其他所有状态都视为“未通过”
|
|
835
|
+
colors.append('lightcoral') # 未通过为红色
|
|
836
|
+
|
|
837
|
+
fig.add_trace(go.Bar(
|
|
838
|
+
y=df['name'], # Y轴显示项目名称
|
|
839
|
+
x=df['score'], # X轴显示通过百分比 (score列现在代表通过百分比)
|
|
840
|
+
orientation='h', # 设置为横向
|
|
841
|
+
name='通过率', # 这个 name 可能会在图例中显示
|
|
842
|
+
marker_color=colors, # !!! 这里根据 status 动态设置颜色 !!!
|
|
843
|
+
text=df['score'].apply(lambda x: f'{x:.2f}%'), # 在条形图上显示百分比文本
|
|
844
|
+
textposition='inside',
|
|
845
|
+
insidetextanchor='middle',
|
|
846
|
+
hovertemplate="<b>prompt:</b> %{y}<br><b>状态:</b> " + df['status'] + "<br><b>总量:</b> "+ df['total'] + "<br><b>通过百分比:</b> %{x:.2f}%<extra></extra>"
|
|
847
|
+
))
|
|
848
|
+
|
|
849
|
+
# 添加一个辅助的条形图作为背景,表示总的100%
|
|
850
|
+
fig.add_trace(go.Bar(
|
|
851
|
+
y=df['name'],
|
|
852
|
+
x=[100] * len(df), # 所有项目都填充到100%
|
|
853
|
+
orientation='h',
|
|
854
|
+
name='总计',
|
|
855
|
+
marker_color='lightgray', # 背景用灰色
|
|
856
|
+
hoverinfo='none', # 不显示hover信息
|
|
857
|
+
opacity=0.5, # 设置透明度
|
|
858
|
+
showlegend=False # 不显示图例
|
|
859
|
+
))
|
|
860
|
+
|
|
861
|
+
fig.update_layout(
|
|
862
|
+
title='各项目/批次通过百分比及状态',
|
|
863
|
+
xaxis=dict(
|
|
864
|
+
title='通过百分比 (%)',
|
|
865
|
+
range=[0, 100], # X轴范围0-100
|
|
866
|
+
tickvals=[0, 25, 50, 75, 100],
|
|
867
|
+
showgrid=True,
|
|
868
|
+
gridcolor='lightgray'
|
|
869
|
+
),
|
|
870
|
+
yaxis=dict(
|
|
871
|
+
title='项目/批次',
|
|
872
|
+
autorange="reversed"
|
|
873
|
+
),
|
|
874
|
+
barmode='overlay', # 仍使用 overlay 模式,因为背景条是独立的
|
|
875
|
+
hovermode="y unified",
|
|
876
|
+
margin=dict(l=100, r=20, t=60, b=50),
|
|
877
|
+
height=400 + len(df) * 30
|
|
878
|
+
)
|
|
879
|
+
error_message =str(df['bad_case'].to_dict())
|
|
880
|
+
fig.add_annotation(
|
|
881
|
+
text=f"<b>bad_case:</b> {error_message}", # 要显示的文本
|
|
882
|
+
xref="paper", yref="paper", # 使用“paper”坐标系,表示相对于图表区域
|
|
883
|
+
x=0.01, y=-0.15, # x=0.01 靠近左侧,y=-0.15 在图表底部下方 (您可以调整这些值)
|
|
884
|
+
showarrow=False, # 不显示箭头
|
|
885
|
+
align="left",
|
|
886
|
+
font=dict(
|
|
887
|
+
family="Arial, sans-serif",
|
|
888
|
+
size=12,
|
|
889
|
+
color="red" # 错误信息通常用红色
|
|
890
|
+
),
|
|
891
|
+
bgcolor="white", # 背景颜色
|
|
892
|
+
bordercolor="red", # 边框颜色
|
|
893
|
+
borderwidth=1,
|
|
894
|
+
borderpad=4,
|
|
895
|
+
xanchor='left', # 文本框左对齐到x坐标
|
|
896
|
+
yanchor='top' # 文本框顶部对齐到y坐标
|
|
897
|
+
)
|
|
898
|
+
# 可能还需要调整底部的边距以容纳错误信息
|
|
899
|
+
fig.update_layout(
|
|
900
|
+
margin=dict(l=100, r=20, t=60, b=100), # 增加底部边距
|
|
901
|
+
height=400 + len(df) * 30 + 50 # 增加图表高度以适应文本框
|
|
902
|
+
)
|
|
903
|
+
|
|
904
|
+
fig.show()
|
|
905
|
+
pass
|
|
906
|
+
|
|
907
|
+
async def _evals(self,prompt_id, OutputFormat, ExtraFormats_list = [],**kwargs):
|
|
908
|
+
|
|
909
|
+
status,score, total, bad_case = await self.intellect_format_eval(
|
|
910
|
+
prompt_id=prompt_id,
|
|
911
|
+
OutputFormat = OutputFormat,
|
|
912
|
+
ExtraFormats = ExtraFormats_list,
|
|
913
|
+
version = None,
|
|
914
|
+
**kwargs
|
|
915
|
+
)
|
|
916
|
+
self.df.loc[len(self.df)] = {"name":prompt_id,
|
|
917
|
+
'status':status,"score":score,
|
|
918
|
+
"total":total,"bad_case":bad_case}
|
|
984
919
|
|
|
985
920
|
# 整体测试d, 测试未通过d, 大模型调整再测试, 依旧不通过, 大模型裂变, 仍不通过, 互换人力
|
pro_craft/prompt_craft/sync.py
CHANGED
|
@@ -86,11 +86,10 @@ class Intel():
|
|
|
86
86
|
|
|
87
87
|
if model_name in ["gemini-2.5-flash-preview-05-20-nothinking",]:
|
|
88
88
|
self.llm = BianXieAdapter(model_name = model_name)
|
|
89
|
-
elif model_name in ["doubao-1-5-pro-256k-250115",]:
|
|
89
|
+
elif model_name in ["doubao-1-5-pro-256k-250115","doubao-1-5-pro-32k-250115"]:
|
|
90
90
|
self.llm = ArkAdapter(model_name = model_name)
|
|
91
91
|
else:
|
|
92
|
-
|
|
93
|
-
self.llm = BianXieAdapter()
|
|
92
|
+
raise Exception("error llm name")
|
|
94
93
|
self.logger = logger or pro_craft_logger
|
|
95
94
|
|
|
96
95
|
def _get_latest_prompt_version(self,target_prompt_id,session):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pro-craft
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.36
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Requires-Python: >=3.12
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
@@ -9,6 +9,7 @@ Requires-Dist: anyio>=4.11.0
|
|
|
9
9
|
Requires-Dist: db-help>=0.2.2
|
|
10
10
|
Requires-Dist: fastapi>=0.119.0
|
|
11
11
|
Requires-Dist: llmada>=1.1.11
|
|
12
|
+
Requires-Dist: plotly>=6.3.1
|
|
12
13
|
Requires-Dist: pyyaml>=6.0.3
|
|
13
14
|
Requires-Dist: toml>=0.10.2
|
|
14
15
|
Requires-Dist: utils-tool==0.1.3
|
|
@@ -1,19 +1,19 @@
|
|
|
1
|
-
pro_craft/__init__.py,sha256=
|
|
2
|
-
pro_craft/database.py,sha256=
|
|
1
|
+
pro_craft/__init__.py,sha256=CkERhdDQyrPWpOb5nArMt2UCcX8STJLF7JC2x7yTBYM,515
|
|
2
|
+
pro_craft/database.py,sha256=cbUcy4_e9RgcDVBNMvERNmTv_pZWgd4ox4a4drBWS6w,9310
|
|
3
3
|
pro_craft/file_manager.py,sha256=abVAlJ07_egWNuTj4JiP4me8NloQrsXGNd-SP63ab94,3738
|
|
4
|
-
pro_craft/log.py,sha256=
|
|
4
|
+
pro_craft/log.py,sha256=EXrNt2kYYPdO3RSr79lpfr5nh0sxRSiYGmswl8-PAzo,3262
|
|
5
5
|
pro_craft/utils.py,sha256=R1DFkS4dsm5dIhg8lLTgBBvItvIYyyojROdh-ykqiYk,5250
|
|
6
6
|
pro_craft/code_helper/coder.py,sha256=L6pRQr0pYRIHrMFZ4-pO_tZf1koxgGgF3L7Vl-GIyjM,24687
|
|
7
7
|
pro_craft/code_helper/designer.py,sha256=3gyCqrjcw61sHzDjUPKhL1LOAE8xWLLbNT8NlK2mFLc,4739
|
|
8
8
|
pro_craft/prompt_craft/__init__.py,sha256=83ruWO1Oci-DWvdVhPqcQrgdZTNfbmK72VQCkWASk7A,80
|
|
9
|
-
pro_craft/prompt_craft/async_.py,sha256=
|
|
9
|
+
pro_craft/prompt_craft/async_.py,sha256=6IqV_2KYSFjS4rzHzoTbz0Sy6g9toJS5HHLs1EW2_E0,40469
|
|
10
10
|
pro_craft/prompt_craft/new.py,sha256=ULjGGl95vmHrOs7XECJGlaqj1NE9BypE5WnFYhGugRY,25903
|
|
11
|
-
pro_craft/prompt_craft/sync.py,sha256=
|
|
11
|
+
pro_craft/prompt_craft/sync.py,sha256=4bms8Qvzq5QqgwHWwiyjrcl7hdkSqE7Kne5s3Ex8bBU,26217
|
|
12
12
|
pro_craft/server/mcp/__init__.py,sha256=4dbl-lFcm0r2tkOP04OxqiZG2jR-rqF181qi2AfU6UA,123
|
|
13
13
|
pro_craft/server/mcp/prompt.py,sha256=OZrsyUfSQMOY_KX7dWthW209adz5JfELsQ0ODfuQR44,1245
|
|
14
14
|
pro_craft/server/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
15
|
pro_craft/server/router/prompt.py,sha256=Wa4FfYRL6oeyA3F-79pmPeIH0Vo8wSEv7RH1lP6jXck,2907
|
|
16
|
-
pro_craft-0.1.
|
|
17
|
-
pro_craft-0.1.
|
|
18
|
-
pro_craft-0.1.
|
|
19
|
-
pro_craft-0.1.
|
|
16
|
+
pro_craft-0.1.36.dist-info/METADATA,sha256=yjrUmXcrIENwRaz8POKuLPWkmCxi9xYKoxOzqWGjIoc,1718
|
|
17
|
+
pro_craft-0.1.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
18
|
+
pro_craft-0.1.36.dist-info/top_level.txt,sha256=yqYDHArnYMWpeCxkmGRwlL6sJtxiOUnYylLDx9EOgFg,10
|
|
19
|
+
pro_craft-0.1.36.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|