pro-craft 0.1.53__tar.gz → 0.1.55__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pro-craft might be problematic. Click here for more details.

Files changed (31) hide show
  1. {pro_craft-0.1.53 → pro_craft-0.1.55}/PKG-INFO +2 -1
  2. {pro_craft-0.1.53 → pro_craft-0.1.55}/pyproject.toml +2 -2
  3. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/prompt_craft/async_.py +128 -3
  4. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft.egg-info/PKG-INFO +2 -1
  5. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft.egg-info/SOURCES.txt +0 -1
  6. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft.egg-info/requires.txt +1 -0
  7. pro_craft-0.1.53/src/pro_craft/server/router/prompt copy.py +0 -183
  8. {pro_craft-0.1.53 → pro_craft-0.1.55}/README.md +0 -0
  9. {pro_craft-0.1.53 → pro_craft-0.1.55}/setup.cfg +0 -0
  10. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/__init__.py +0 -0
  11. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/code_helper/coder.py +0 -0
  12. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/code_helper/designer.py +0 -0
  13. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/database.py +0 -0
  14. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/file_manager.py +0 -0
  15. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/log.py +0 -0
  16. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/prompt_craft/__init__.py +0 -0
  17. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/prompt_craft/new.py +0 -0
  18. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/prompt_craft/sync.py +0 -0
  19. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/server/mcp/__init__.py +0 -0
  20. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/server/mcp/models.py +0 -0
  21. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/server/mcp/prompt.py +0 -0
  22. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/server/router/__init__.py +0 -0
  23. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/server/router/models.py +0 -0
  24. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/server/router/prompt.py +0 -0
  25. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft/utils.py +0 -0
  26. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft.egg-info/dependency_links.txt +0 -0
  27. {pro_craft-0.1.53 → pro_craft-0.1.55}/src/pro_craft.egg-info/top_level.txt +0 -0
  28. {pro_craft-0.1.53 → pro_craft-0.1.55}/tests/test22.py +0 -0
  29. {pro_craft-0.1.53 → pro_craft-0.1.55}/tests/test_11.py +0 -0
  30. {pro_craft-0.1.53 → pro_craft-0.1.55}/tests/test_coder.py +0 -0
  31. {pro_craft-0.1.53 → pro_craft-0.1.55}/tests/test_designer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.53
3
+ Version: 0.1.55
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -10,6 +10,7 @@ Requires-Dist: db-help>=0.2.2
10
10
  Requires-Dist: fastapi>=0.119.0
11
11
  Requires-Dist: llmada>=1.1.11
12
12
  Requires-Dist: mcp[cli]>=1.19.0
13
+ Requires-Dist: modusched==0.1.5
13
14
  Requires-Dist: plotly>=6.3.1
14
15
  Requires-Dist: pyyaml>=6.0.3
15
16
  Requires-Dist: toml>=0.10.2
@@ -1,10 +1,10 @@
1
1
  [project]
2
2
  name = "pro-craft"
3
- version = "0.1.53"
3
+ version = "0.1.55"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
7
- dependencies = [ "aiomysql>=0.2.0", "anyio>=4.11.0", "db-help>=0.2.2", "fastapi>=0.119.0", "llmada>=1.1.11", "mcp[cli]>=1.19.0", "plotly>=6.3.1", "pyyaml>=6.0.3", "toml>=0.10.2", "utils-tool==0.1.3", "uvicorn>=0.38.0",]
7
+ dependencies = [ "aiomysql>=0.2.0", "anyio>=4.11.0", "db-help>=0.2.2", "fastapi>=0.119.0", "llmada>=1.1.11", "mcp[cli]>=1.19.0", "modusched==0.1.5", "plotly>=6.3.1", "pyyaml>=6.0.3", "toml>=0.10.2", "utils-tool==0.1.3", "uvicorn>=0.38.0",]
8
8
 
9
9
  [tool.setuptools.package-data]
10
10
  pro_craft = [ "config.yaml",]
@@ -25,6 +25,7 @@ from datetime import datetime, timedelta
25
25
  from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
26
26
  from sqlalchemy import select, and_ # 引入 select 和 and_
27
27
  from sqlalchemy.orm import class_mapper # 用于检查对象是否是持久化的
28
+ from datetime import datetime, timedelta
28
29
 
29
30
  from tqdm.asyncio import tqdm
30
31
  import pandas as pd
@@ -333,6 +334,8 @@ class AsyncIntel():
333
334
 
334
335
  async def get_use_case(self,
335
336
  target_prompt_id: str,
337
+ start_time: datetime = None, # 新增:开始时间
338
+ end_time: datetime = None, # 新增:结束时间
336
339
  session = None
337
340
  ):
338
341
  """
@@ -341,6 +344,11 @@ class AsyncIntel():
341
344
  stmt = select(UseCase).filter(UseCase.is_deleted == 0,
342
345
  UseCase.prompt_id == target_prompt_id)
343
346
 
347
+ if start_time:
348
+ stmt = stmt.filter(UseCase.created_at >= start_time) # 假设你的UseCase模型有一个created_at字段
349
+
350
+ if end_time:
351
+ stmt = stmt.filter(UseCase.created_at <= end_time)
344
352
  result = await session.execute(stmt)
345
353
  # use_case = result.scalars().one_or_none()
346
354
  use_case = result.scalars().all()
@@ -665,8 +673,7 @@ class AsyncIntel():
665
673
  output_format = ""
666
674
 
667
675
  if logger:
668
- logger.info(f'{type(input_data)} $ intellect输入 input_data $ {input_data}')
669
- logger.info(f'{type(output_format)} $ intellect输入 output_format $ {output_format}')
676
+ logger.info(f'{type(input_data)} $ intellect-输入 $ {input_data} \noutput_format: \n{output_format}')
670
677
 
671
678
  ai_result = await self.intellect(
672
679
  input_data=input_data,
@@ -683,12 +690,13 @@ class AsyncIntel():
683
690
 
684
691
  except JSONDecodeError as e:
685
692
  try:
686
- self.logger.error(f"尝试补救")
693
+ self.logger.error(f'{type(json_str)} $ intellect尝试补救 $ {json_str}')
687
694
  json_str = fix_broken_json_string(json_str)
688
695
  ai_result = json.loads(json_str)
689
696
  OutputFormat(**ai_result)
690
697
 
691
698
  except JSONDecodeError as e:
699
+ self.logger.error(f'{type(json_str)} $ {prompt_id}intellect生成的内容为无法被Json解析 $ {json_str}')
692
700
  raise IntellectRemoveFormatError(f"prompt_id: {prompt_id} 生成的内容为无法被Json解析 {e}") from e
693
701
 
694
702
  except ValidationError as e:
@@ -791,6 +799,122 @@ class AsyncIntel():
791
799
  if prompt_result.action_type != "inference":
792
800
  raise IntellectRemoveError("请在inference模式下使用次类")
793
801
 
802
+ if database_url:
803
+ eval_engine = create_async_engine(database_url, echo=False,
804
+ pool_size=10, # 连接池中保持的连接数
805
+ max_overflow=20, # 当pool_size不够时,允许临时创建的额外连接数
806
+ pool_recycle=3600, # 每小时回收一次连接
807
+ pool_pre_ping=True, # 使用前检查连接活性
808
+ pool_timeout=30 # 等待连接池中连接的最长时间(秒)
809
+ )
810
+ else:
811
+ eval_engine = self.engine
812
+ async with create_async_session(eval_engine) as eval_session:
813
+ # start = datetime(2023, 1, 1, 10, 0, 0)
814
+ # end = datetime(2023, 1, 15, 12, 30, 0)
815
+ use_cases = await self.get_use_case(target_prompt_id=prompt_id,session=eval_session,
816
+ start_time=None,
817
+ end_time=None,)
818
+
819
+ total_assertions = len(use_cases)
820
+ result_cases = []
821
+
822
+ async def evals_func(use_case,prompt_id,OutputFormat,ExtraFormats,version):
823
+ try:
824
+
825
+ # 这里将参数传入
826
+ ai_result = await self.intellect_format(
827
+ input_data = use_case.use_case,
828
+ prompt_id = prompt_id,
829
+ OutputFormat = OutputFormat,
830
+ ExtraFormats = ExtraFormats,
831
+ version = version,
832
+ inference_save_case = False,
833
+ ConTent_Function = ConTent_Function,
834
+ AConTent_Function = AConTent_Function,
835
+ )
836
+
837
+ result_cases.append({"type":"Successful","case":use_case.use_case,"reply":f"pass"})
838
+ use_case.output = json.dumps(ai_result,ensure_ascii=False,indent=4)
839
+
840
+
841
+ except IntellectRemoveFormatError as e:
842
+ result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"{e}"})
843
+ use_case.output = f"{"FAILED"}-{e}"
844
+ use_case.faired_time +=1
845
+
846
+ except Exception as e: # 捕获其他可能的错误
847
+ result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"Exp {e}"})
848
+ use_case.output = f"{"FAILED"}-{e}"
849
+ use_case.faired_time +=1
850
+
851
+
852
+ tasks = []
853
+ for use_case in use_cases:
854
+ tasks.append(
855
+ evals_func(
856
+ use_case = use_case,
857
+ prompt_id = prompt_id,
858
+ OutputFormat = OutputFormat,
859
+ ExtraFormats = ExtraFormats,
860
+ version = version
861
+ )
862
+ )
863
+ await tqdm.gather(*tasks,total=len(tasks))
864
+ # await asyncio.gather(*tasks, return_exceptions=False)
865
+
866
+ await eval_session.commit()
867
+
868
+ successful_assertions = 0
869
+ bad_case = []
870
+ for i in result_cases:
871
+ if i['type'] == "Successful":
872
+ successful_assertions += 1
873
+ else:
874
+ bad_case.append(i)
875
+
876
+ success_rate = (successful_assertions / total_assertions) * 100
877
+
878
+
879
+ if success_rate >= MIN_SUCCESS_RATE:
880
+ self.eval_df.loc[len(self.eval_df)] = {"name":prompt_id,
881
+ 'status':"通过",
882
+ "score":success_rate,
883
+ "total":str(total_assertions),
884
+ "bad_case":json.dumps(bad_case,ensure_ascii=False)}
885
+ return "通过", success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
886
+ else:
887
+ self.eval_df.loc[len(self.eval_df)] = {"name":prompt_id,
888
+ 'status':"未通过",
889
+ "score":success_rate,
890
+ "total":str(total_assertions),
891
+ "bad_case":json.dumps(bad_case,ensure_ascii=False)}
892
+ return "未通过",success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
893
+
894
+
895
+ async def function_eval(self,
896
+ OutputFormat: object,
897
+ prompt_id: str,
898
+ database_url = None,
899
+ ExtraFormats: list[object] = [],
900
+ version: str = None,
901
+ MIN_SUCCESS_RATE = 80.0,
902
+ ConTent_Function = None,
903
+ AConTent_Function = None,
904
+ ):
905
+ """
906
+ ConTent_Function:
907
+ # TODO 人类评价 eval
908
+ # TODO llm 评价 eval
909
+ """
910
+ async with create_async_session(self.engine) as session:
911
+ prompt_result = await self.get_prompt_safe(prompt_id=prompt_id,
912
+ session=session)
913
+ if prompt_result is None:
914
+ raise IntellectRemoveError("不存在的prompt_id")
915
+ if prompt_result.action_type != "inference":
916
+ raise IntellectRemoveError("请在inference模式下使用次类")
917
+
794
918
  if database_url:
795
919
  eval_engine = create_async_engine(database_url, echo=False,
796
920
  pool_size=10, # 连接池中保持的连接数
@@ -879,6 +1003,7 @@ class AsyncIntel():
879
1003
  "bad_case":json.dumps(bad_case,ensure_ascii=False)}
880
1004
  return "未通过",success_rate, str(total_assertions), json.dumps(bad_case,ensure_ascii=False),
881
1005
 
1006
+
882
1007
  def draw_data(self,save_html_path = ""):
883
1008
  df = self.eval_df
884
1009
  # --- 可视化部分 ---
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.53
3
+ Version: 0.1.55
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -10,6 +10,7 @@ Requires-Dist: db-help>=0.2.2
10
10
  Requires-Dist: fastapi>=0.119.0
11
11
  Requires-Dist: llmada>=1.1.11
12
12
  Requires-Dist: mcp[cli]>=1.19.0
13
+ Requires-Dist: modusched==0.1.5
13
14
  Requires-Dist: plotly>=6.3.1
14
15
  Requires-Dist: pyyaml>=6.0.3
15
16
  Requires-Dist: toml>=0.10.2
@@ -21,7 +21,6 @@ src/pro_craft/server/mcp/models.py
21
21
  src/pro_craft/server/mcp/prompt.py
22
22
  src/pro_craft/server/router/__init__.py
23
23
  src/pro_craft/server/router/models.py
24
- src/pro_craft/server/router/prompt copy.py
25
24
  src/pro_craft/server/router/prompt.py
26
25
  tests/test22.py
27
26
  tests/test_11.py
@@ -4,6 +4,7 @@ db-help>=0.2.2
4
4
  fastapi>=0.119.0
5
5
  llmada>=1.1.11
6
6
  mcp[cli]>=1.19.0
7
+ modusched==0.1.5
7
8
  plotly>=6.3.1
8
9
  pyyaml>=6.0.3
9
10
  toml>=0.10.2
@@ -1,183 +0,0 @@
1
-
2
-
3
- from fastapi import APIRouter
4
- from pro_craft import Intel,AsyncIntel
5
- from pro_craft.utils import create_async_session
6
- from fastapi import FastAPI, HTTPException
7
- from .models import *
8
- from fastapi import APIRouter, Depends, HTTPException, status, Header
9
- import os
10
-
11
-
12
- def create_router(database_url: str,
13
- slave_database_url: str,
14
- model_name: str,
15
- logger = None):
16
- """
17
- # TODO 整理改为异步
18
- 创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
19
-
20
- Args:
21
- database_url (str): 数据库连接字符串。
22
- model_name (str): 用于 Intel 实例的模型名称。
23
- api_key_secret (str, optional): 用于验证 API Key 的秘密字符串。
24
- 如果提供,它将覆盖环境变量 PRO_CRAFT_API_KEY。
25
- 如果都不提供,会使用硬编码的 'your_default_secret_key'。
26
- Returns:
27
- APIRouter: 配置好的 FastAPI APIRouter 实例。
28
- """
29
-
30
- intels = AsyncIntel(
31
- database_url=database_url,
32
- model_name=model_name,
33
- logger=logger
34
- )
35
-
36
- async def verify_api_key(authorization: Optional[str] = Header(None)):
37
- # if not authorization:
38
- # raise HTTPException(status_code=401, detail="Invalid authorization scheme")
39
- # if not authorization.startswith("Bearer "):
40
- # raise HTTPException(status_code=401, detail="Invalid authorization scheme")
41
-
42
- # token = authorization.split(" ")[1]
43
-
44
- key = os.getenv("prompt_api_key")
45
- key_ = "123578"
46
- if key_ != key:
47
- raise HTTPException(status_code=401, detail="Error Server Position2")
48
-
49
- router = APIRouter(
50
- tags=["prompt"], # 这里使用 Depends 确保每次请求都验证
51
- dependencies = [Depends(verify_api_key)]
52
- )
53
-
54
- # 自动修改
55
- @router.post("/push_order",
56
- description="可选 train,inference,summary,finetune,patch",
57
- response_model=PromptResponse,
58
- )
59
- async def push_order(request: PushOrderRequest):
60
- try:
61
- result = await intels.push_action_order(
62
- demand=request.demand,
63
- prompt_id=request.prompt_id,
64
- action_type=request.action_type
65
- )
66
- return PromptResponse(msg = "success",content=result)
67
- except Exception as e:
68
- raise HTTPException(
69
- status_code=500, detail=f"{e}"
70
- )
71
-
72
- # 人为干预
73
-
74
- @router.get("/registered_prompt",
75
- description="获取以注册的提示词",
76
- response_model=PromptResponse)
77
- async def registered_prompt():
78
- try:
79
- result = ["memorycard-format",
80
- "memorycard-polish",
81
- "memorycard-merge",
82
- "memorycard-score",
83
- "memorycard-generate-content",
84
- "user-overview",
85
- "user-relationship-extraction",
86
- "avatar-brief",
87
- "avatar-personality-extraction",
88
- "avatar-desensitization",
89
- ""
90
- "biograph-free-writer",
91
- "biograph-paid-title",
92
- "biograph-outline",
93
- "biograph-brief",
94
- "biograph-extract-person-name",
95
- "biograph-extract-place",
96
- "biograph-extract-material",
97
- "biograph_material_add",
98
- "biograph_material_init",
99
- "biograph-writer"]
100
-
101
- return PromptResponse(msg = "success",content=' | '.join(result))
102
- except Exception as e:
103
- raise HTTPException(
104
- status_code=500, detail=f"{e}"
105
- )
106
- @router.post("/get_prompt",
107
- description="获得现行提示词",
108
- response_model=PromptResponse)
109
- async def get_prompt(request: GetPromptRequest):
110
- try:
111
- async with create_async_session(intels.engine) as session:
112
- result = await intels.get_prompt_safe(
113
- prompt_id=request.prompt_id,
114
- version = request.version,
115
- session=session
116
- )
117
- return PromptResponse(msg = "success",content={"prompt": result.prompt, "version": result.version})
118
-
119
- except Exception as e:
120
- raise HTTPException(
121
- status_code=500, detail=f"{e}"
122
- )
123
-
124
- @router.post("/update_prompt",
125
- description="更新现行提示词",
126
- response_model=PromptResponse)
127
- async def update_prompt(request: UpdatePromptRequest):
128
- try:
129
- async with create_async_session(intels.engine) as session:
130
- await intels.save_prompt(
131
- prompt_id = request.prompt_id,
132
- new_prompt = request.prompt,
133
- use_case = "",
134
- action_type = "inference",
135
- demand = "上传",
136
- score = 70,
137
- session = session)
138
- return PromptResponse(msg = "success",content="")
139
- except Exception as e:
140
- raise HTTPException(
141
- status_code=500, detail=f"{e}"
142
- )
143
-
144
- @router.post("/rollback_prompt",
145
- description="回滚现行提示词",
146
- response_model=PromptResponse)
147
- async def roll_back(request: RollBackPromptRequest):
148
- try:
149
- async with create_async_session(intels.engine) as session:
150
- result = await intels.get_prompt_safe(
151
- prompt_id=request.prompt_id,
152
- version = request.version,
153
- session=session
154
- )
155
- assert result.version == request.version
156
- await intels.save_prompt(
157
- prompt_id = request.prompt_id,
158
- new_prompt = result.prompt,
159
- use_case = result.use_case,
160
- action_type = "inference",
161
- demand = "",
162
- score = 61,
163
- session = session)
164
- return PromptResponse(msg = "success",content="")
165
- except Exception as e:
166
- raise HTTPException(
167
- status_code=500, detail=f"{e}"
168
- )
169
-
170
-
171
- #系统级别服务
172
-
173
- @router.post("/sync_database")
174
- async def sync_database():
175
- try:
176
- result = await intels.sync_production_database(slave_database_url)
177
- return PromptResponse(msg = "success",content="")
178
- except Exception as e:
179
- raise HTTPException(
180
- status_code=500, detail=f"{e}"
181
- )
182
- return router
183
-
File without changes
File without changes
File without changes
File without changes