pro-craft 0.1.37__tar.gz → 0.1.39__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pro-craft might be problematic. Click here for more details.
- {pro_craft-0.1.37 → pro_craft-0.1.39}/PKG-INFO +2 -1
- {pro_craft-0.1.37 → pro_craft-0.1.39}/pyproject.toml +2 -2
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/database.py +70 -1
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/prompt_craft/async_.py +68 -28
- pro_craft-0.1.39/src/pro_craft/server/mcp/models.py +35 -0
- pro_craft-0.1.39/src/pro_craft/server/mcp/prompt.py +78 -0
- pro_craft-0.1.39/src/pro_craft/server/router/models.py +35 -0
- pro_craft-0.1.39/src/pro_craft/server/router/prompt.py +163 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft.egg-info/PKG-INFO +2 -1
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft.egg-info/SOURCES.txt +2 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft.egg-info/requires.txt +1 -0
- pro_craft-0.1.37/src/pro_craft/server/mcp/prompt.py +0 -45
- pro_craft-0.1.37/src/pro_craft/server/router/prompt.py +0 -80
- {pro_craft-0.1.37 → pro_craft-0.1.39}/README.md +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/setup.cfg +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/__init__.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/code_helper/coder.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/code_helper/designer.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/file_manager.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/log.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/prompt_craft/__init__.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/prompt_craft/new.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/prompt_craft/sync.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/server/mcp/__init__.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/server/router/__init__.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft/utils.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft.egg-info/dependency_links.txt +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/src/pro_craft.egg-info/top_level.txt +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/tests/test22.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/tests/test_11.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/tests/test_coder.py +0 -0
- {pro_craft-0.1.37 → pro_craft-0.1.39}/tests/test_designer.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pro-craft
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.39
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Requires-Python: >=3.12
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
@@ -9,6 +9,7 @@ Requires-Dist: anyio>=4.11.0
|
|
|
9
9
|
Requires-Dist: db-help>=0.2.2
|
|
10
10
|
Requires-Dist: fastapi>=0.119.0
|
|
11
11
|
Requires-Dist: llmada>=1.1.11
|
|
12
|
+
Requires-Dist: mcp[cli]>=1.19.0
|
|
12
13
|
Requires-Dist: plotly>=6.3.1
|
|
13
14
|
Requires-Dist: pyyaml>=6.0.3
|
|
14
15
|
Requires-Dist: toml>=0.10.2
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "pro-craft"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.39"
|
|
4
4
|
description = "Add your description here"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.12"
|
|
7
|
-
dependencies = [ "aiomysql>=0.2.0", "anyio>=4.11.0", "db-help>=0.2.2", "fastapi>=0.119.0", "llmada>=1.1.11", "plotly>=6.3.1", "pyyaml>=6.0.3", "toml>=0.10.2", "utils-tool==0.1.3", "uvicorn>=0.38.0",]
|
|
7
|
+
dependencies = [ "aiomysql>=0.2.0", "anyio>=4.11.0", "db-help>=0.2.2", "fastapi>=0.119.0", "llmada>=1.1.11", "mcp[cli]>=1.19.0", "plotly>=6.3.1", "pyyaml>=6.0.3", "toml>=0.10.2", "utils-tool==0.1.3", "uvicorn>=0.38.0",]
|
|
8
8
|
|
|
9
9
|
[tool.setuptools.package-data]
|
|
10
10
|
pro_craft = [ "config.yaml",]
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
|
|
2
|
-
from sqlalchemy import Column, Integer, String, Text, DateTime, text, UniqueConstraint, Boolean, func
|
|
2
|
+
from sqlalchemy import Column, Integer, String, Text, DateTime, text, UniqueConstraint, Boolean, func, Float
|
|
3
3
|
from sqlalchemy.orm import declarative_base
|
|
4
4
|
|
|
5
5
|
from datetime import datetime, timedelta
|
|
@@ -248,3 +248,72 @@ class UseCase(PromptBase):
|
|
|
248
248
|
f"is_deleted='{self.is_deleted}...'>")
|
|
249
249
|
|
|
250
250
|
|
|
251
|
+
class EvalsInfo(PromptBase):
|
|
252
|
+
__tablename__ = 'ai_evals' # 数据库中的表名,你可以改成你希望的名字
|
|
253
|
+
|
|
254
|
+
# __table_args__ = (
|
|
255
|
+
# UniqueConstraint('prompt_id', 'use_case', name='_prompt_id_version_uc'),
|
|
256
|
+
# # 'name' 参数是可选的,用于给数据库中的约束指定一个名称,方便管理和调试
|
|
257
|
+
# )
|
|
258
|
+
|
|
259
|
+
# id (int, primary_key=True, autoincrement=True)
|
|
260
|
+
# 你的属性表中 id 为 int, true (not null), true (primary key), 0 (length), ASC (key order), true (auto increment)
|
|
261
|
+
id = Column(
|
|
262
|
+
Integer,
|
|
263
|
+
primary_key=True,
|
|
264
|
+
autoincrement=True, # 自动递增
|
|
265
|
+
nullable=False, # 不能为空
|
|
266
|
+
comment="Primary key ID"
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
# prompt_id (varchar 255, not null, unique)
|
|
270
|
+
# 你的属性表中 prompt_id 为 varchar, 255 (length), true (not null)
|
|
271
|
+
prompt_id = Column(
|
|
272
|
+
String(255), # VARCHAR 类型,长度 255
|
|
273
|
+
nullable=False, # 不能为空 # 必须是唯一的,这会创建唯一索引
|
|
274
|
+
comment="Unique identifier for the prompt"
|
|
275
|
+
)
|
|
276
|
+
status = Column(
|
|
277
|
+
String(255), # VARCHAR 类型,长度 255
|
|
278
|
+
nullable=False, # 不能为空 # 必须是唯一的,这会创建唯一索引
|
|
279
|
+
comment="通过/未通过"
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
score = Column(
|
|
283
|
+
Float,
|
|
284
|
+
nullable=False, # 不能为空
|
|
285
|
+
comment="失败次数"
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
total = Column(
|
|
289
|
+
Integer,
|
|
290
|
+
nullable=False, # 不能为空
|
|
291
|
+
comment="失败次数"
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# prompt (text, not null)
|
|
295
|
+
# 你的属性表中 prompt 为 text, true (not null)
|
|
296
|
+
bad_case = Column(
|
|
297
|
+
Text, # TEXT 类型,适用于长文本
|
|
298
|
+
nullable=False, # 不能为空
|
|
299
|
+
comment="用例"
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
timestamp = Column(
|
|
303
|
+
DateTime,
|
|
304
|
+
nullable=False, # 不能为空
|
|
305
|
+
server_default=text('CURRENT_TIMESTAMP'),
|
|
306
|
+
onupdate=text('CURRENT_TIMESTAMP'),
|
|
307
|
+
comment="时间戳"
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
is_deleted = Column(Boolean, default=False, server_default=text('0'))
|
|
312
|
+
|
|
313
|
+
# 定义 __repr__ 方法以便打印对象时有清晰的表示
|
|
314
|
+
def __repr__(self):
|
|
315
|
+
return (f"<Prompt(id={self.id}, prompt_id='{self.prompt_id}', "
|
|
316
|
+
f"status='{self.status}...', score='{self.score}')>"
|
|
317
|
+
f"total='{self.total}...', score='{self.score}')>"
|
|
318
|
+
f"is_deleted='{self.is_deleted}...'>")
|
|
319
|
+
|
|
@@ -25,14 +25,14 @@ from datetime import datetime, timedelta
|
|
|
25
25
|
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
|
|
26
26
|
from sqlalchemy import select, and_ # 引入 select 和 and_
|
|
27
27
|
from sqlalchemy.orm import class_mapper # 用于检查对象是否是持久化的
|
|
28
|
-
|
|
28
|
+
|
|
29
29
|
from tqdm.asyncio import tqdm
|
|
30
30
|
import pandas as pd
|
|
31
31
|
import plotly.graph_objects as go
|
|
32
32
|
from pro_craft import super_log
|
|
33
33
|
|
|
34
34
|
BATCH_SIZE = int(os.getenv("DATABASE_SYNC_BATCH_SIZE",100))
|
|
35
|
-
|
|
35
|
+
print(12)
|
|
36
36
|
|
|
37
37
|
def fix_broken_json_string(broken_json_str):
|
|
38
38
|
# 移除 BOM
|
|
@@ -421,6 +421,10 @@ class AsyncIntel():
|
|
|
421
421
|
inference_save_case = True,
|
|
422
422
|
change_case = False,
|
|
423
423
|
):
|
|
424
|
+
"""
|
|
425
|
+
自定自动化执行命令的方法,
|
|
426
|
+
不涉及严格的校验, 主要职能在自动化的修改提示词, 或者管理提示词上
|
|
427
|
+
"""
|
|
424
428
|
if isinstance(input_data,dict):
|
|
425
429
|
input_ = json.dumps(input_data,ensure_ascii=False)
|
|
426
430
|
elif isinstance(input_data,str):
|
|
@@ -634,7 +638,16 @@ class AsyncIntel():
|
|
|
634
638
|
ExtraFormats: list[object] = [],
|
|
635
639
|
version: str = None,
|
|
636
640
|
inference_save_case = True,
|
|
641
|
+
ConTent_Function = None,
|
|
642
|
+
AConTent_Function = None,
|
|
637
643
|
):
|
|
644
|
+
"""
|
|
645
|
+
这个format 是严格校验模式, 是interllect 的增强版, 会主动校验内容,并及时抛出异常(或者伺机修正)
|
|
646
|
+
ConTent_Function
|
|
647
|
+
AConTent_Function
|
|
648
|
+
两种方式的传入方式, 内容未通过就抛出异常
|
|
649
|
+
|
|
650
|
+
"""
|
|
638
651
|
|
|
639
652
|
base_format_prompt = """
|
|
640
653
|
按照一定格式输出, 以便可以通过如下校验
|
|
@@ -642,7 +655,7 @@ class AsyncIntel():
|
|
|
642
655
|
使用以下正则检出
|
|
643
656
|
"```json([\s\S]*?)```"
|
|
644
657
|
使用以下方式验证
|
|
645
|
-
"""
|
|
658
|
+
"""
|
|
646
659
|
if OutputFormat:
|
|
647
660
|
output_format = base_format_prompt + "\n".join([inspect.getsource(outputformat) for outputformat in ExtraFormats]) + inspect.getsource(OutputFormat)
|
|
648
661
|
else:
|
|
@@ -676,6 +689,12 @@ class AsyncIntel():
|
|
|
676
689
|
|
|
677
690
|
except Exception as e:
|
|
678
691
|
raise Exception(f"Error {prompt_id} : {e}") from e
|
|
692
|
+
|
|
693
|
+
if ConTent_Function:
|
|
694
|
+
ConTent_Function(ai_result)
|
|
695
|
+
|
|
696
|
+
if AConTent_Function:
|
|
697
|
+
await AConTent_Function(ai_result)
|
|
679
698
|
|
|
680
699
|
return ai_result
|
|
681
700
|
|
|
@@ -686,6 +705,7 @@ class AsyncIntel():
|
|
|
686
705
|
ExtraFormats: list[object] = [],
|
|
687
706
|
version: str = None,
|
|
688
707
|
inference_save_case = True,
|
|
708
|
+
**kwargs,
|
|
689
709
|
):
|
|
690
710
|
|
|
691
711
|
async with create_async_session(self.engine) as session:
|
|
@@ -705,6 +725,7 @@ class AsyncIntel():
|
|
|
705
725
|
ExtraFormats = ExtraFormats,
|
|
706
726
|
version = version,
|
|
707
727
|
inference_save_case = inference_save_case,
|
|
728
|
+
**kwargs,
|
|
708
729
|
)
|
|
709
730
|
)
|
|
710
731
|
results = await asyncio.gather(*tasks, return_exceptions=False)
|
|
@@ -717,8 +738,7 @@ class AsyncIntel():
|
|
|
717
738
|
# 修改逻辑
|
|
718
739
|
assert kwargs.get('input_data') # 要求一定要有data入参
|
|
719
740
|
input_data = kwargs.get('input_data')
|
|
720
|
-
|
|
721
|
-
|
|
741
|
+
kwargs.pop(input_data)
|
|
722
742
|
if isinstance(input_data,dict):
|
|
723
743
|
input_ = output_ = json.dumps(input_data,ensure_ascii=False)
|
|
724
744
|
elif isinstance(input_data,str):
|
|
@@ -727,7 +747,8 @@ class AsyncIntel():
|
|
|
727
747
|
output_ = await self.intellect_format(
|
|
728
748
|
input_data = input_data,
|
|
729
749
|
prompt_id = prompt_id,
|
|
730
|
-
|
|
750
|
+
**kwargs,
|
|
751
|
+
|
|
731
752
|
)
|
|
732
753
|
|
|
733
754
|
#######
|
|
@@ -740,10 +761,12 @@ class AsyncIntel():
|
|
|
740
761
|
async def intellect_format_eval(self,
|
|
741
762
|
OutputFormat: object,
|
|
742
763
|
prompt_id: str,
|
|
764
|
+
database_url = None,
|
|
743
765
|
ExtraFormats: list[object] = [],
|
|
744
766
|
version: str = None,
|
|
745
767
|
MIN_SUCCESS_RATE = 80.0,
|
|
746
768
|
ConTent_Function = None,
|
|
769
|
+
AConTent_Function = None,
|
|
747
770
|
):
|
|
748
771
|
"""
|
|
749
772
|
ConTent_Function:
|
|
@@ -751,20 +774,32 @@ class AsyncIntel():
|
|
|
751
774
|
# TODO llm 评价 eval
|
|
752
775
|
"""
|
|
753
776
|
async with create_async_session(self.engine) as session:
|
|
754
|
-
use_cases = await self.get_use_case(target_prompt_id=prompt_id,session=session)
|
|
755
777
|
prompt_result = await self.get_prompt_safe(prompt_id=prompt_id,
|
|
756
778
|
session=session)
|
|
757
779
|
if prompt_result is None:
|
|
758
780
|
raise IntellectRemoveError("不存在的prompt_id")
|
|
759
781
|
if prompt_result.action_type != "inference":
|
|
760
782
|
raise IntellectRemoveError("请在inference模式下使用次类")
|
|
761
|
-
|
|
783
|
+
|
|
784
|
+
if database_url:
|
|
785
|
+
eval_engine = create_async_engine(database_url, echo=False,
|
|
786
|
+
pool_size=10, # 连接池中保持的连接数
|
|
787
|
+
max_overflow=20, # 当pool_size不够时,允许临时创建的额外连接数
|
|
788
|
+
pool_recycle=3600, # 每小时回收一次连接
|
|
789
|
+
pool_pre_ping=True, # 使用前检查连接活性
|
|
790
|
+
pool_timeout=30 # 等待连接池中连接的最长时间(秒)
|
|
791
|
+
)
|
|
792
|
+
else:
|
|
793
|
+
eval_engine = self.engine
|
|
794
|
+
async with create_async_session(eval_engine) as eval_session:
|
|
795
|
+
use_cases = await self.get_use_case(target_prompt_id=prompt_id,session=eval_session)
|
|
762
796
|
|
|
763
797
|
total_assertions = len(use_cases)
|
|
764
798
|
result_cases = []
|
|
765
799
|
|
|
766
800
|
async def evals_func(use_case,prompt_id,OutputFormat,ExtraFormats,version):
|
|
767
801
|
try:
|
|
802
|
+
|
|
768
803
|
# 这里将参数传入
|
|
769
804
|
ai_result = await self.intellect_format(
|
|
770
805
|
input_data = use_case.use_case,
|
|
@@ -773,18 +808,24 @@ class AsyncIntel():
|
|
|
773
808
|
ExtraFormats = ExtraFormats,
|
|
774
809
|
version = version,
|
|
775
810
|
inference_save_case = False,
|
|
811
|
+
ConTent_Function = ConTent_Function,
|
|
812
|
+
AConTent_Function = AConTent_Function,
|
|
776
813
|
)
|
|
777
|
-
|
|
778
|
-
ConTent_Function()
|
|
814
|
+
|
|
779
815
|
result_cases.append({"type":"Successful","case":use_case.use_case,"reply":f"pass"})
|
|
780
|
-
use_case.output = ai_result
|
|
816
|
+
use_case.output = json.dumps(ai_result,ensure_ascii=False,indent=4)
|
|
817
|
+
|
|
818
|
+
|
|
781
819
|
except IntellectRemoveFormatError as e:
|
|
782
820
|
result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"{e}"})
|
|
783
821
|
use_case.output = f"{"FAILED"}-{e}"
|
|
822
|
+
use_case.faired_time +=1
|
|
823
|
+
|
|
784
824
|
except Exception as e: # 捕获其他可能的错误
|
|
785
825
|
result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"Exp {e}"})
|
|
786
826
|
use_case.output = f"{"FAILED"}-{e}"
|
|
787
|
-
|
|
827
|
+
use_case.faired_time +=1
|
|
828
|
+
|
|
788
829
|
|
|
789
830
|
tasks = []
|
|
790
831
|
for use_case in use_cases:
|
|
@@ -800,6 +841,7 @@ class AsyncIntel():
|
|
|
800
841
|
await tqdm.gather(*tasks,total=len(tasks))
|
|
801
842
|
# await asyncio.gather(*tasks, return_exceptions=False)
|
|
802
843
|
|
|
844
|
+
await eval_session.commit()
|
|
803
845
|
|
|
804
846
|
successful_assertions = 0
|
|
805
847
|
bad_case = []
|
|
@@ -811,14 +853,23 @@ class AsyncIntel():
|
|
|
811
853
|
|
|
812
854
|
success_rate = (successful_assertions / total_assertions) * 100
|
|
813
855
|
|
|
856
|
+
|
|
814
857
|
if success_rate >= MIN_SUCCESS_RATE:
|
|
858
|
+
self.eval_df.loc[len(self.eval_df)] = {"name":prompt_id,
|
|
859
|
+
'status':"通过",
|
|
860
|
+
"score":success_rate,
|
|
861
|
+
"total":str(total_assertions),
|
|
862
|
+
"bad_case":json.dumps(bad_case,ensure_ascii=False)}
|
|
815
863
|
return "通过", success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
|
|
816
864
|
else:
|
|
865
|
+
self.eval_df.loc[len(self.eval_df)] = {"name":prompt_id,
|
|
866
|
+
'status':"未通过",
|
|
867
|
+
"score":success_rate,
|
|
868
|
+
"total":str(total_assertions),
|
|
869
|
+
"bad_case":json.dumps(bad_case,ensure_ascii=False)}
|
|
817
870
|
return "未通过",success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
|
|
818
871
|
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
def draw_data(self):
|
|
872
|
+
def draw_data(self,save_html_path):
|
|
822
873
|
df = self.eval_df
|
|
823
874
|
# --- 可视化部分 ---
|
|
824
875
|
fig = go.Figure()
|
|
@@ -899,19 +950,8 @@ class AsyncIntel():
|
|
|
899
950
|
)
|
|
900
951
|
|
|
901
952
|
fig.show()
|
|
902
|
-
|
|
953
|
+
if save_html_path:
|
|
954
|
+
fig.write_html(save_html_path)
|
|
903
955
|
|
|
904
|
-
async def _evals(self,prompt_id, OutputFormat, ExtraFormats_list = [],**kwargs):
|
|
905
|
-
|
|
906
|
-
status,score, total, bad_case = await self.intellect_format_eval(
|
|
907
|
-
prompt_id=prompt_id,
|
|
908
|
-
OutputFormat = OutputFormat,
|
|
909
|
-
ExtraFormats = ExtraFormats_list,
|
|
910
|
-
version = None,
|
|
911
|
-
**kwargs
|
|
912
|
-
)
|
|
913
|
-
self.df.loc[len(self.df)] = {"name":prompt_id,
|
|
914
|
-
'status':status,"score":score,
|
|
915
|
-
"total":total,"bad_case":bad_case}
|
|
916
956
|
|
|
917
957
|
# 整体测试d, 测试未通过d, 大模型调整再测试, 依旧不通过, 大模型裂变, 仍不通过, 互换人力
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from typing import Dict, Any, Optional, List
|
|
2
|
+
from pydantic import BaseModel, Field, model_validator, field_validator, RootModel
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
class PushOrderRequest(BaseModel):
|
|
6
|
+
demand: str = Field(None, description="信息")
|
|
7
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
8
|
+
action_type: str = Field(..., description="执行动作",min_length=1, max_length=10)
|
|
9
|
+
|
|
10
|
+
@field_validator('action_type')
|
|
11
|
+
@classmethod
|
|
12
|
+
def validate_action_type(cls, v: str) -> str:
|
|
13
|
+
if v in ['train','inference','summary','finetune','patch']:
|
|
14
|
+
return v
|
|
15
|
+
else:
|
|
16
|
+
raise ValueError(f"无效action_type: {v}")
|
|
17
|
+
|
|
18
|
+
class GetPromptRequest(BaseModel):
|
|
19
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
20
|
+
|
|
21
|
+
class UpdatePromptRequest(BaseModel):
|
|
22
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
23
|
+
prompt: str = Field(..., description="新的提示词")
|
|
24
|
+
|
|
25
|
+
class RollBackPromptRequest(BaseModel):
|
|
26
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
27
|
+
version: str = Field(..., description="版本号")
|
|
28
|
+
|
|
29
|
+
class SyncDataBaseRequest(BaseModel):
|
|
30
|
+
slave_database_url: str = Field(None, description="从属数据库url")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class PromptResponse(BaseModel):
|
|
34
|
+
msg: str = Field(..., description="信息")
|
|
35
|
+
content: str = None
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from mcp.server.fastmcp import FastMCP
|
|
2
|
+
|
|
3
|
+
from pro_craft import Intel,AsyncIntel
|
|
4
|
+
from .models import *
|
|
5
|
+
|
|
6
|
+
def create_mcp(database_url: str,
|
|
7
|
+
slave_database_url: str,
|
|
8
|
+
model_name: str,
|
|
9
|
+
logger = None):
|
|
10
|
+
# region MCP Weather
|
|
11
|
+
mcp = FastMCP("Prompt")
|
|
12
|
+
|
|
13
|
+
intels = AsyncIntel(
|
|
14
|
+
database_url=database_url,
|
|
15
|
+
model_name=model_name,
|
|
16
|
+
logger=logger
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
@mcp.tool()
|
|
20
|
+
async def push_order(demand: str, prompt_id: str, action_type: str):
|
|
21
|
+
"""
|
|
22
|
+
希望大模型进行哪种模式的调整
|
|
23
|
+
demand: str = Field(None, description="信息")
|
|
24
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
25
|
+
action_type: str = Field(..., description="执行动作",min_length=1, max_length=10)
|
|
26
|
+
"""
|
|
27
|
+
try:
|
|
28
|
+
PushOrderRequest(demand=demand,prompt_id=prompt_id,action_type=action_type)
|
|
29
|
+
result = await intels.push_action_order(
|
|
30
|
+
demand=demand,
|
|
31
|
+
prompt_id=prompt_id,
|
|
32
|
+
action_type=action_type
|
|
33
|
+
)
|
|
34
|
+
return PromptResponse(msg = "success",content=result)
|
|
35
|
+
except Exception as e:
|
|
36
|
+
return f"Error : {e}"
|
|
37
|
+
|
|
38
|
+
@mcp.tool()
|
|
39
|
+
async def get_registered_prompt():
|
|
40
|
+
"获取以注册的可修改的提示词id"
|
|
41
|
+
try:
|
|
42
|
+
result = ["memorycard-format",
|
|
43
|
+
"memorycard-polish",
|
|
44
|
+
"memorycard-merge",
|
|
45
|
+
"memorycard-score",
|
|
46
|
+
"memorycard-generate-content",
|
|
47
|
+
"user-overview",
|
|
48
|
+
"user-relationship-extraction",
|
|
49
|
+
"avatar-brief",
|
|
50
|
+
"avatar-personality-extraction",
|
|
51
|
+
"avatar-desensitization",
|
|
52
|
+
"biograph-free-writer",
|
|
53
|
+
"biograph-paid-title",
|
|
54
|
+
"biograph-outline",
|
|
55
|
+
"biograph-brief",
|
|
56
|
+
"biograph-extract-person-name",
|
|
57
|
+
"biograph-extract-place",
|
|
58
|
+
"biograph-extract-material",
|
|
59
|
+
"biograph-writer"]
|
|
60
|
+
|
|
61
|
+
return PromptResponse(msg = "success",content=result)
|
|
62
|
+
except Exception as e:
|
|
63
|
+
return f"Error : {e}"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@mcp.tool()
|
|
67
|
+
async def sync_database():
|
|
68
|
+
try:
|
|
69
|
+
result = await intels.sync_production_database(slave_database_url)
|
|
70
|
+
return PromptResponse(msg = "success",content=result)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
return f"Error : {e}"
|
|
73
|
+
|
|
74
|
+
return mcp
|
|
75
|
+
|
|
76
|
+
if __name__ == "__main__":
|
|
77
|
+
mcp = create_mcp()
|
|
78
|
+
mcp.run(transport="streamable-http")
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from typing import Dict, Any, Optional, List
|
|
2
|
+
from pydantic import BaseModel, Field, model_validator, field_validator, RootModel
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
class PushOrderRequest(BaseModel):
|
|
6
|
+
demand: str = Field(None, description="信息")
|
|
7
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
8
|
+
action_type: str = Field(..., description="执行动作",min_length=1, max_length=10)
|
|
9
|
+
|
|
10
|
+
@field_validator('action_type')
|
|
11
|
+
@classmethod
|
|
12
|
+
def validate_action_type(cls, v: str) -> str:
|
|
13
|
+
if v in ['train','inference','summary','finetune','patch']:
|
|
14
|
+
return v
|
|
15
|
+
else:
|
|
16
|
+
raise ValueError(f"无效action_type: {v}")
|
|
17
|
+
|
|
18
|
+
class GetPromptRequest(BaseModel):
|
|
19
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
20
|
+
|
|
21
|
+
class UpdatePromptRequest(BaseModel):
|
|
22
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
23
|
+
prompt: str = Field(..., description="新的提示词")
|
|
24
|
+
|
|
25
|
+
class RollBackPromptRequest(BaseModel):
|
|
26
|
+
prompt_id: str = Field(..., description="提示词id")
|
|
27
|
+
version: str = Field(..., description="版本号")
|
|
28
|
+
|
|
29
|
+
class SyncDataBaseRequest(BaseModel):
|
|
30
|
+
slave_database_url: str = Field(None, description="从属数据库url")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class PromptResponse(BaseModel):
|
|
34
|
+
msg: str = Field(..., description="信息")
|
|
35
|
+
content: str = None
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter
|
|
4
|
+
from pro_craft import Intel,AsyncIntel
|
|
5
|
+
from pro_craft.utils import create_async_session
|
|
6
|
+
from fastapi import FastAPI, HTTPException
|
|
7
|
+
from .models import *
|
|
8
|
+
|
|
9
|
+
def create_router(database_url: str,
|
|
10
|
+
slave_database_url: str,
|
|
11
|
+
model_name: str,
|
|
12
|
+
logger = None):
|
|
13
|
+
"""
|
|
14
|
+
# TODO 整理改为异步
|
|
15
|
+
创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
database_url (str): 数据库连接字符串。
|
|
19
|
+
model_name (str): 用于 Intel 实例的模型名称。
|
|
20
|
+
api_key_secret (str, optional): 用于验证 API Key 的秘密字符串。
|
|
21
|
+
如果提供,它将覆盖环境变量 PRO_CRAFT_API_KEY。
|
|
22
|
+
如果都不提供,会使用硬编码的 'your_default_secret_key'。
|
|
23
|
+
Returns:
|
|
24
|
+
APIRouter: 配置好的 FastAPI APIRouter 实例。
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
intels = AsyncIntel(
|
|
28
|
+
database_url=database_url,
|
|
29
|
+
model_name=model_name,
|
|
30
|
+
logger=logger
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
router = APIRouter(
|
|
34
|
+
tags=["prompt"] # 这里使用 Depends 确保每次请求都验证
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# 自动修改
|
|
38
|
+
@router.post("/push_order",
|
|
39
|
+
description="可选 train,inference,summary,finetune,patch",
|
|
40
|
+
response_model=PromptResponse,
|
|
41
|
+
)
|
|
42
|
+
async def push_order(request: PushOrderRequest):
|
|
43
|
+
try:
|
|
44
|
+
result = await intels.push_action_order(
|
|
45
|
+
demand=request.demand,
|
|
46
|
+
prompt_id=request.prompt_id,
|
|
47
|
+
action_type=request.action_type
|
|
48
|
+
)
|
|
49
|
+
return PromptResponse(msg = "success",content=result)
|
|
50
|
+
except Exception as e:
|
|
51
|
+
raise HTTPException(
|
|
52
|
+
status_code=500, detail=f"{e}"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# 人为干预
|
|
56
|
+
|
|
57
|
+
@router.get("/registered_prompt",
|
|
58
|
+
description="获取以注册的提示词",
|
|
59
|
+
response_model=PromptResponse)
|
|
60
|
+
async def get_prompt():
|
|
61
|
+
try:
|
|
62
|
+
result = ["memorycard-format",
|
|
63
|
+
"memorycard-polish",
|
|
64
|
+
"memorycard-merge",
|
|
65
|
+
"memorycard-score",
|
|
66
|
+
"memorycard-generate-content",
|
|
67
|
+
"user-overview",
|
|
68
|
+
"user-relationship-extraction",
|
|
69
|
+
"avatar-brief",
|
|
70
|
+
"avatar-personality-extraction",
|
|
71
|
+
"avatar-desensitization",
|
|
72
|
+
"biograph-free-writer",
|
|
73
|
+
"biograph-paid-title",
|
|
74
|
+
"biograph-outline",
|
|
75
|
+
"biograph-brief",
|
|
76
|
+
"biograph-extract-person-name",
|
|
77
|
+
"biograph-extract-place",
|
|
78
|
+
"biograph-extract-material",
|
|
79
|
+
"biograph-writer"]
|
|
80
|
+
|
|
81
|
+
return PromptResponse(msg = "success",content=result)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
raise HTTPException(
|
|
84
|
+
status_code=500, detail=f"{e}"
|
|
85
|
+
)
|
|
86
|
+
@router.post("/get_prompt",
|
|
87
|
+
description="获得现行提示词",
|
|
88
|
+
response_model=PromptResponse)
|
|
89
|
+
async def get_prompt(request: GetPromptRequest):
|
|
90
|
+
try:
|
|
91
|
+
async with create_async_session(intels.engine) as session:
|
|
92
|
+
result = await intels.get_prompt_safe(
|
|
93
|
+
prompt_id=request.prompt_id,
|
|
94
|
+
session=session
|
|
95
|
+
)
|
|
96
|
+
return PromptResponse(msg = "success",content=result)
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
raise HTTPException(
|
|
100
|
+
status_code=500, detail=f"{e}"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
@router.post("/update_prompt",
|
|
104
|
+
description="更新现行提示词",
|
|
105
|
+
response_model=PromptResponse)
|
|
106
|
+
async def update_prompt(request: UpdatePromptRequest):
|
|
107
|
+
try:
|
|
108
|
+
async with create_async_session(intels.engine) as session:
|
|
109
|
+
await intels.save_prompt(
|
|
110
|
+
prompt_id = request.prompt_id,
|
|
111
|
+
new_prompt = request.prompt,
|
|
112
|
+
use_case = "",
|
|
113
|
+
action_type = "inference",
|
|
114
|
+
demand = "上传",
|
|
115
|
+
score = 70,
|
|
116
|
+
session = session)
|
|
117
|
+
return PromptResponse(msg = "success",content=None)
|
|
118
|
+
except Exception as e:
|
|
119
|
+
raise HTTPException(
|
|
120
|
+
status_code=500, detail=f"{e}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
@router.post("/rollback_prompt",
|
|
124
|
+
description="回滚现行提示词",
|
|
125
|
+
response_model=PromptResponse)
|
|
126
|
+
async def roll_back(request: RollBackPromptRequest):
|
|
127
|
+
try:
|
|
128
|
+
async with create_async_session(intels.engine) as session:
|
|
129
|
+
result = await intels.get_prompt_safe(
|
|
130
|
+
prompt_id=request.prompt_id,
|
|
131
|
+
version = request.version,
|
|
132
|
+
session=session
|
|
133
|
+
)
|
|
134
|
+
assert result.version == request.version
|
|
135
|
+
await intels.save_prompt(
|
|
136
|
+
prompt_id = request.prompt_id,
|
|
137
|
+
new_prompt = result.prompt,
|
|
138
|
+
use_case = result.use_case,
|
|
139
|
+
action_type = "inference",
|
|
140
|
+
demand = "",
|
|
141
|
+
score = 61,
|
|
142
|
+
session = session)
|
|
143
|
+
return PromptResponse(msg = "success",content=None)
|
|
144
|
+
except Exception as e:
|
|
145
|
+
raise HTTPException(
|
|
146
|
+
status_code=500, detail=f"{e}"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
#系统级别服务
|
|
151
|
+
|
|
152
|
+
@router.post("/sync_database")
|
|
153
|
+
async def sync_database(request: SyncDataBaseRequest):
|
|
154
|
+
try:
|
|
155
|
+
slave_database_url = request.slave_database_url or slave_database_url
|
|
156
|
+
result = await intels.sync_production_database(slave_database_url)
|
|
157
|
+
return PromptResponse(msg = "success",content=result)
|
|
158
|
+
except Exception as e:
|
|
159
|
+
raise HTTPException(
|
|
160
|
+
status_code=500, detail=f"{e}"
|
|
161
|
+
)
|
|
162
|
+
return router
|
|
163
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pro-craft
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.39
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Requires-Python: >=3.12
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
@@ -9,6 +9,7 @@ Requires-Dist: anyio>=4.11.0
|
|
|
9
9
|
Requires-Dist: db-help>=0.2.2
|
|
10
10
|
Requires-Dist: fastapi>=0.119.0
|
|
11
11
|
Requires-Dist: llmada>=1.1.11
|
|
12
|
+
Requires-Dist: mcp[cli]>=1.19.0
|
|
12
13
|
Requires-Dist: plotly>=6.3.1
|
|
13
14
|
Requires-Dist: pyyaml>=6.0.3
|
|
14
15
|
Requires-Dist: toml>=0.10.2
|
|
@@ -17,8 +17,10 @@ src/pro_craft/prompt_craft/async_.py
|
|
|
17
17
|
src/pro_craft/prompt_craft/new.py
|
|
18
18
|
src/pro_craft/prompt_craft/sync.py
|
|
19
19
|
src/pro_craft/server/mcp/__init__.py
|
|
20
|
+
src/pro_craft/server/mcp/models.py
|
|
20
21
|
src/pro_craft/server/mcp/prompt.py
|
|
21
22
|
src/pro_craft/server/router/__init__.py
|
|
23
|
+
src/pro_craft/server/router/models.py
|
|
22
24
|
src/pro_craft/server/router/prompt.py
|
|
23
25
|
tests/test22.py
|
|
24
26
|
tests/test_11.py
|
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
from mcp.server.fastmcp import FastMCP
|
|
2
|
-
|
|
3
|
-
from pro_craft import Intel
|
|
4
|
-
|
|
5
|
-
def create_mcp(database_url: str,
|
|
6
|
-
slave_database_url: str,
|
|
7
|
-
model_name: str):
|
|
8
|
-
# region MCP Weather
|
|
9
|
-
mcp = FastMCP("PromptManager")
|
|
10
|
-
|
|
11
|
-
intels = Intel(
|
|
12
|
-
database_url=database_url,
|
|
13
|
-
model_name=model_name
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
@mcp.tool()
|
|
17
|
-
def push_order(demand: str, prompt_id: str, action_type: str = "train") -> str:
|
|
18
|
-
result = intels.push_action_order(
|
|
19
|
-
demand=demand,
|
|
20
|
-
prompt_id=prompt_id,
|
|
21
|
-
action_type=action_type
|
|
22
|
-
)
|
|
23
|
-
return {"message": "success", "result": result}
|
|
24
|
-
|
|
25
|
-
@mcp.tool()
|
|
26
|
-
def get_latest_prompt(prompt_id: str) -> str:
|
|
27
|
-
with create_session(intels.engine) as session:
|
|
28
|
-
result = intels.get_prompts_from_sql(
|
|
29
|
-
prompt_id=prompt_id,
|
|
30
|
-
session=session
|
|
31
|
-
)
|
|
32
|
-
return {"message": "success", "result": result}
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
@mcp.tool()
|
|
36
|
-
def sync_database() -> str:
|
|
37
|
-
result = intels.sync_prompt_data_to_database(slave_database_url)
|
|
38
|
-
return {"message": "success","result":result}
|
|
39
|
-
|
|
40
|
-
return mcp
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
if __name__ == "__main__":
|
|
44
|
-
mcp = create_mcp()
|
|
45
|
-
mcp.run(transport="streamable-http")
|
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
from fastapi import APIRouter
|
|
4
|
-
from pro_craft import Intel,AsyncIntel
|
|
5
|
-
from pro_craft.utils import create_async_session
|
|
6
|
-
|
|
7
|
-
def create_router(database_url: str,
|
|
8
|
-
slave_database_url: str,
|
|
9
|
-
model_name: str,
|
|
10
|
-
logger = None):
|
|
11
|
-
"""
|
|
12
|
-
# TODO 整理改为异步
|
|
13
|
-
创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
|
|
14
|
-
|
|
15
|
-
Args:
|
|
16
|
-
database_url (str): 数据库连接字符串。
|
|
17
|
-
model_name (str): 用于 Intel 实例的模型名称。
|
|
18
|
-
api_key_secret (str, optional): 用于验证 API Key 的秘密字符串。
|
|
19
|
-
如果提供,它将覆盖环境变量 PRO_CRAFT_API_KEY。
|
|
20
|
-
如果都不提供,会使用硬编码的 'your_default_secret_key'。
|
|
21
|
-
Returns:
|
|
22
|
-
APIRouter: 配置好的 FastAPI APIRouter 实例。
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
|
-
intels = AsyncIntel(
|
|
26
|
-
database_url=database_url,
|
|
27
|
-
model_name=model_name,
|
|
28
|
-
logger=logger
|
|
29
|
-
)
|
|
30
|
-
|
|
31
|
-
router = APIRouter(
|
|
32
|
-
tags=["prompt"] # 这里使用 Depends 确保每次请求都验证
|
|
33
|
-
)
|
|
34
|
-
|
|
35
|
-
@router.get("/push_order",
|
|
36
|
-
description="可选 train,inference,summary,finetune,patch",)
|
|
37
|
-
async def push_order(demand: str, prompt_id: str, action_type: str = "train"):
|
|
38
|
-
result = await intels.push_action_order(
|
|
39
|
-
demand=demand,
|
|
40
|
-
prompt_id=prompt_id,
|
|
41
|
-
action_type=action_type
|
|
42
|
-
)
|
|
43
|
-
return {"message": "success", "result": result}
|
|
44
|
-
|
|
45
|
-
@router.get("/get_latest_prompt")
|
|
46
|
-
async def get_latest_prompt(prompt_id: str):
|
|
47
|
-
async with create_async_session(intels.engine) as session:
|
|
48
|
-
result = await intels.get_prompts_from_sql(
|
|
49
|
-
prompt_id=prompt_id,
|
|
50
|
-
session=session
|
|
51
|
-
)
|
|
52
|
-
return {"message": "success", "result": result}
|
|
53
|
-
|
|
54
|
-
@router.get("/sync_database")
|
|
55
|
-
async def sync_database():
|
|
56
|
-
result = await intels.sync_prompt_data_to_database(slave_database_url)
|
|
57
|
-
return {"message": "success","result":result}
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
@router.get("/roll_back")
|
|
61
|
-
async def roll_back(prompt_id:str,version:str):
|
|
62
|
-
async with create_async_session(intels.engine) as session:
|
|
63
|
-
result = await intels.get_prompts_from_sql(
|
|
64
|
-
prompt_id=prompt_id,
|
|
65
|
-
version = version,
|
|
66
|
-
session=session
|
|
67
|
-
)
|
|
68
|
-
assert result.version == version
|
|
69
|
-
await intels.save_prompt_increment_version(
|
|
70
|
-
prompt_id = prompt_id,
|
|
71
|
-
new_prompt = result.prompt,
|
|
72
|
-
use_case = result.use_case,
|
|
73
|
-
action_type = "inference",
|
|
74
|
-
demand = "",
|
|
75
|
-
score = 61,
|
|
76
|
-
session = session)
|
|
77
|
-
return {"message": "success"}
|
|
78
|
-
|
|
79
|
-
return router
|
|
80
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|