pro-craft 0.1.36__tar.gz → 0.1.38__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pro-craft might be problematic. Click here for more details.

Files changed (30) hide show
  1. {pro_craft-0.1.36 → pro_craft-0.1.38}/PKG-INFO +1 -1
  2. {pro_craft-0.1.36 → pro_craft-0.1.38}/pyproject.toml +1 -1
  3. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/log.py +1 -1
  4. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/prompt_craft/async_.py +58 -35
  5. pro_craft-0.1.38/src/pro_craft/server/router/models.py +35 -0
  6. pro_craft-0.1.38/src/pro_craft/server/router/prompt.py +132 -0
  7. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft.egg-info/PKG-INFO +1 -1
  8. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft.egg-info/SOURCES.txt +1 -0
  9. pro_craft-0.1.36/src/pro_craft/server/router/prompt.py +0 -80
  10. {pro_craft-0.1.36 → pro_craft-0.1.38}/README.md +0 -0
  11. {pro_craft-0.1.36 → pro_craft-0.1.38}/setup.cfg +0 -0
  12. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/__init__.py +0 -0
  13. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/code_helper/coder.py +0 -0
  14. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/code_helper/designer.py +0 -0
  15. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/database.py +0 -0
  16. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/file_manager.py +0 -0
  17. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/prompt_craft/__init__.py +0 -0
  18. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/prompt_craft/new.py +0 -0
  19. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/prompt_craft/sync.py +0 -0
  20. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/server/mcp/__init__.py +0 -0
  21. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/server/mcp/prompt.py +0 -0
  22. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/server/router/__init__.py +0 -0
  23. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft/utils.py +0 -0
  24. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft.egg-info/dependency_links.txt +0 -0
  25. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft.egg-info/requires.txt +0 -0
  26. {pro_craft-0.1.36 → pro_craft-0.1.38}/src/pro_craft.egg-info/top_level.txt +0 -0
  27. {pro_craft-0.1.36 → pro_craft-0.1.38}/tests/test22.py +0 -0
  28. {pro_craft-0.1.36 → pro_craft-0.1.38}/tests/test_11.py +0 -0
  29. {pro_craft-0.1.36 → pro_craft-0.1.38}/tests/test_coder.py +0 -0
  30. {pro_craft-0.1.36 → pro_craft-0.1.38}/tests/test_designer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "pro-craft"
3
- version = "0.1.36"
3
+ version = "0.1.38"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -12,7 +12,7 @@ class Log:
12
12
  def __init__(self, console_level = logging.INFO, log_file_name="app.log"):
13
13
  self.Console_LOG_LEVEL = console_level
14
14
  self.log_file_name = log_file_name
15
- os.makedirs("logs", exist_ok=False)
15
+ os.makedirs("logs", exist_ok=True)
16
16
  self.LOG_FILE_PATH = os.path.join("logs", log_file_name)
17
17
  self.logger = self.get_logger()
18
18
  self.super_log_level = self.logger.critical
@@ -25,14 +25,14 @@ from datetime import datetime, timedelta
25
25
  from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
26
26
  from sqlalchemy import select, and_ # 引入 select 和 and_
27
27
  from sqlalchemy.orm import class_mapper # 用于检查对象是否是持久化的
28
- import tqdm
28
+
29
29
  from tqdm.asyncio import tqdm
30
30
  import pandas as pd
31
31
  import plotly.graph_objects as go
32
32
  from pro_craft import super_log
33
33
 
34
34
  BATCH_SIZE = int(os.getenv("DATABASE_SYNC_BATCH_SIZE",100))
35
-
35
+ print(12)
36
36
 
37
37
  def fix_broken_json_string(broken_json_str):
38
38
  # 移除 BOM
@@ -364,7 +364,7 @@ class AsyncIntel():
364
364
  session = session)
365
365
  for use_case_old in use_cases:
366
366
  if use_case == use_case_old.use_case:
367
- print("用例已经存在")
367
+ # print("用例已经存在")
368
368
  return
369
369
 
370
370
  use_case = UseCase(prompt_id=prompt_id,
@@ -642,7 +642,7 @@ class AsyncIntel():
642
642
  使用以下正则检出
643
643
  "```json([\s\S]*?)```"
644
644
  使用以下方式验证
645
- """
645
+ """
646
646
  if OutputFormat:
647
647
  output_format = base_format_prompt + "\n".join([inspect.getsource(outputformat) for outputformat in ExtraFormats]) + inspect.getsource(OutputFormat)
648
648
  else:
@@ -654,36 +654,34 @@ class AsyncIntel():
654
654
  version=version,
655
655
  inference_save_case=inference_save_case,
656
656
  )
657
-
658
- try:
659
- json_str = extract_(ai_result,r'json')
660
- ai_result = json.loads(json_str)
661
- if OutputFormat:
662
- OutputFormat(**ai_result)
663
-
664
- except JSONDecodeError as e:
657
+ if OutputFormat:
665
658
  try:
666
- self.logger.error(f"尝试补救")
667
- json_str = fix_broken_json_string(json_str)
659
+ json_str = extract_(ai_result,r'json')
668
660
  ai_result = json.loads(json_str)
669
- if OutputFormat:
670
- OutputFormat(**ai_result)
661
+ OutputFormat(**ai_result)
671
662
 
672
663
  except JSONDecodeError as e:
673
- raise IntellectRemoveFormatError(f"prompt_id: {prompt_id} 生成的内容为无法被Json解析 {e}") from e
674
-
675
- except ValidationError as e:
676
- err_info = e.errors()[0]
677
- raise IntellectRemoveFormatError(f"{err_info["type"]}: 属性:{err_info['loc']}, 发生了如下错误: {err_info['msg']}, 格式校验失败, 当前输入为: {err_info['input']} 请检查") from e
664
+ try:
665
+ self.logger.error(f"尝试补救")
666
+ json_str = fix_broken_json_string(json_str)
667
+ ai_result = json.loads(json_str)
668
+ OutputFormat(**ai_result)
669
+
670
+ except JSONDecodeError as e:
671
+ raise IntellectRemoveFormatError(f"prompt_id: {prompt_id} 生成的内容为无法被Json解析 {e}") from e
672
+
673
+ except ValidationError as e:
674
+ err_info = e.errors()[0]
675
+ raise IntellectRemoveFormatError(f"{err_info["type"]}: 属性:{err_info['loc']}, 发生了如下错误: {err_info['msg']}, 格式校验失败, 当前输入为: {err_info['input']} 请检查") from e
678
676
 
679
- except Exception as e:
680
- raise Exception(f"Error {prompt_id} : {e}") from e
677
+ except Exception as e:
678
+ raise Exception(f"Error {prompt_id} : {e}") from e
681
679
 
682
680
  return ai_result
683
681
 
684
682
  async def intellect_formats(self,
685
683
  input_datas: list[dict | str],
686
- OutputFormat: object,
684
+ OutputFormat: object | None,
687
685
  prompt_id: str,
688
686
  ExtraFormats: list[object] = [],
689
687
  version: str = None,
@@ -719,8 +717,7 @@ class AsyncIntel():
719
717
  # 修改逻辑
720
718
  assert kwargs.get('input_data') # 要求一定要有data入参
721
719
  input_data = kwargs.get('input_data')
722
- assert kwargs.get('OutputFormat') # 要求一定要有data入参
723
- OutputFormat = kwargs.get('OutputFormat')
720
+ OutputFormat = kwargs.get('OutputFormat','')
724
721
 
725
722
  if isinstance(input_data,dict):
726
723
  input_ = output_ = json.dumps(input_data,ensure_ascii=False)
@@ -743,10 +740,12 @@ class AsyncIntel():
743
740
  async def intellect_format_eval(self,
744
741
  OutputFormat: object,
745
742
  prompt_id: str,
743
+ database_url = None,
746
744
  ExtraFormats: list[object] = [],
747
745
  version: str = None,
748
746
  MIN_SUCCESS_RATE = 80.0,
749
747
  ConTent_Function = None,
748
+ AConTent_Function = None,
750
749
  ):
751
750
  """
752
751
  ConTent_Function:
@@ -754,20 +753,32 @@ class AsyncIntel():
754
753
  # TODO llm 评价 eval
755
754
  """
756
755
  async with create_async_session(self.engine) as session:
757
- use_cases = await self.get_use_case(target_prompt_id=prompt_id,session=session)
758
756
  prompt_result = await self.get_prompt_safe(prompt_id=prompt_id,
759
757
  session=session)
760
758
  if prompt_result is None:
761
759
  raise IntellectRemoveError("不存在的prompt_id")
762
760
  if prompt_result.action_type != "inference":
763
761
  raise IntellectRemoveError("请在inference模式下使用次类")
764
-
762
+
763
+ if database_url:
764
+ eval_engine = create_async_engine(database_url, echo=False,
765
+ pool_size=10, # 连接池中保持的连接数
766
+ max_overflow=20, # 当pool_size不够时,允许临时创建的额外连接数
767
+ pool_recycle=3600, # 每小时回收一次连接
768
+ pool_pre_ping=True, # 使用前检查连接活性
769
+ pool_timeout=30 # 等待连接池中连接的最长时间(秒)
770
+ )
771
+ else:
772
+ eval_engine = self.engine
773
+ async with create_async_session(eval_engine) as eval_session:
774
+ use_cases = await self.get_use_case(target_prompt_id=prompt_id,session=eval_session)
765
775
 
766
776
  total_assertions = len(use_cases)
767
777
  result_cases = []
768
778
 
769
779
  async def evals_func(use_case,prompt_id,OutputFormat,ExtraFormats,version):
770
780
  try:
781
+
771
782
  # 这里将参数传入
772
783
  ai_result = await self.intellect_format(
773
784
  input_data = use_case.use_case,
@@ -778,16 +789,25 @@ class AsyncIntel():
778
789
  inference_save_case = False,
779
790
  )
780
791
  if ConTent_Function:
781
- ConTent_Function()
792
+ ConTent_Function(ai_result)
793
+
794
+ if AConTent_Function:
795
+ await AConTent_Function(ai_result)
796
+
782
797
  result_cases.append({"type":"Successful","case":use_case.use_case,"reply":f"pass"})
783
- use_case.output = ai_result
798
+ use_case.output = json.dumps(ai_result,ensure_ascii=False,indent=4)
799
+
800
+
784
801
  except IntellectRemoveFormatError as e:
785
802
  result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"{e}"})
786
803
  use_case.output = f"{"FAILED"}-{e}"
804
+ use_case.faired_time +=1
805
+
787
806
  except Exception as e: # 捕获其他可能的错误
788
807
  result_cases.append({"type":"FAILED","case":use_case.use_case,"reply":f"Exp {e}"})
789
808
  use_case.output = f"{"FAILED"}-{e}"
790
- await session.commit()
809
+ use_case.faired_time +=1
810
+
791
811
 
792
812
  tasks = []
793
813
  for use_case in use_cases:
@@ -803,6 +823,7 @@ class AsyncIntel():
803
823
  await tqdm.gather(*tasks,total=len(tasks))
804
824
  # await asyncio.gather(*tasks, return_exceptions=False)
805
825
 
826
+ await eval_session.commit()
806
827
 
807
828
  successful_assertions = 0
808
829
  bad_case = []
@@ -821,7 +842,7 @@ class AsyncIntel():
821
842
 
822
843
 
823
844
 
824
- def draw_data(self):
845
+ def draw_data(self,save_html_path):
825
846
  df = self.eval_df
826
847
  # --- 可视化部分 ---
827
848
  fig = go.Figure()
@@ -902,18 +923,20 @@ class AsyncIntel():
902
923
  )
903
924
 
904
925
  fig.show()
905
- pass
926
+ if save_html_path:
927
+ fig.write_html(save_html_path)
906
928
 
907
- async def _evals(self,prompt_id, OutputFormat, ExtraFormats_list = [],**kwargs):
929
+ async def _evals(self,prompt_id, OutputFormat, ExtraFormats_list = [],database_url = None,**kwargs):
908
930
 
909
931
  status,score, total, bad_case = await self.intellect_format_eval(
910
932
  prompt_id=prompt_id,
911
933
  OutputFormat = OutputFormat,
912
934
  ExtraFormats = ExtraFormats_list,
913
935
  version = None,
936
+ database_url = database_url,
914
937
  **kwargs
915
938
  )
916
- self.df.loc[len(self.df)] = {"name":prompt_id,
939
+ self.eval_df.loc[len(self.eval_df)] = {"name":prompt_id,
917
940
  'status':status,"score":score,
918
941
  "total":total,"bad_case":bad_case}
919
942
 
@@ -0,0 +1,35 @@
1
+ from typing import Dict, Any, Optional, List
2
+ from pydantic import BaseModel, Field, model_validator, field_validator, RootModel
3
+ import re
4
+
5
+ class PushOrderRequest(BaseModel):
6
+ demand: str = Field(None, description="信息")
7
+ prompt_id: str = Field(..., description="提示词id")
8
+ action_type: str = Field(..., description="执行动作",min_length=1, max_length=10)
9
+
10
+ @field_validator('action_type')
11
+ @classmethod
12
+ def validate_action_type(cls, v: str) -> str:
13
+ if v in ['train','inference','summary','finetune','patch']:
14
+ return v
15
+ else:
16
+ raise ValueError(f"无效action_type: {v}")
17
+
18
+ class GetPromptRequest(BaseModel):
19
+ prompt_id: str = Field(..., description="提示词id")
20
+
21
+ class UpdatePromptRequest(BaseModel):
22
+ prompt_id: str = Field(..., description="提示词id")
23
+ prompt: str = Field(..., description="新的提示词")
24
+
25
+ class RollBackPromptRequest(BaseModel):
26
+ prompt_id: str = Field(..., description="提示词id")
27
+ version: str = Field(..., description="版本号")
28
+
29
+ class SyncDataBaseRequest(BaseModel):
30
+ slave_database_url: str = Field(None, description="从属数据库url")
31
+
32
+
33
+ class PromptResponse(BaseModel):
34
+ msg: str = Field(..., description="信息")
35
+ content: str = None
@@ -0,0 +1,132 @@
1
+
2
+
3
+ from fastapi import APIRouter
4
+ from pro_craft import Intel,AsyncIntel
5
+ from pro_craft.utils import create_async_session
6
+ from .models import *
7
+
8
+ def create_router(database_url: str,
9
+ slave_database_url: str,
10
+ model_name: str,
11
+ logger = None):
12
+ """
13
+ # TODO 整理改为异步
14
+ 创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
15
+
16
+ Args:
17
+ database_url (str): 数据库连接字符串。
18
+ model_name (str): 用于 Intel 实例的模型名称。
19
+ api_key_secret (str, optional): 用于验证 API Key 的秘密字符串。
20
+ 如果提供,它将覆盖环境变量 PRO_CRAFT_API_KEY。
21
+ 如果都不提供,会使用硬编码的 'your_default_secret_key'。
22
+ Returns:
23
+ APIRouter: 配置好的 FastAPI APIRouter 实例。
24
+ """
25
+
26
+ intels = AsyncIntel(
27
+ database_url=database_url,
28
+ model_name=model_name,
29
+ logger=logger
30
+ )
31
+
32
+ router = APIRouter(
33
+ tags=["prompt"] # 这里使用 Depends 确保每次请求都验证
34
+ )
35
+
36
+ # 自动修改
37
+ @router.post("/push_order",
38
+ description="可选 train,inference,summary,finetune,patch",
39
+ response_model=PromptResponse,
40
+ )
41
+ async def push_order(request: PushOrderRequest):
42
+ result = await intels.push_action_order(
43
+ demand=request.demand,
44
+ prompt_id=request.prompt_id,
45
+ action_type=request.action_type
46
+ )
47
+ return PromptResponse(msg = "success",content=result)
48
+
49
+ # 人为干预
50
+
51
+ @router.get("/registered_prompt",
52
+ description="获取以注册的提示词",
53
+ response_model=PromptResponse)
54
+ async def get_prompt():
55
+ result = ["memorycard-format",
56
+ "memorycard-polish",
57
+ "memorycard-merge",
58
+ "memorycard-score",
59
+ "memorycard-generate-content",
60
+ "user-overview",
61
+ "user-relationship-extraction",
62
+ "avatar-brief",
63
+ "avatar-personality-extraction",
64
+ "avatar-desensitization",
65
+ "biograph-free-writer",
66
+ "biograph-paid-title",
67
+ "biograph-outline",
68
+ "biograph-brief",
69
+ "biograph-extract-person-name",
70
+ "biograph-extract-place",
71
+ "biograph-extract-material",
72
+ "biograph-writer"]
73
+
74
+ return PromptResponse(msg = "success",content=result)
75
+
76
+ @router.post("/get_prompt",
77
+ description="获得现行提示词",
78
+ response_model=PromptResponse)
79
+ async def get_prompt(request: GetPromptRequest):
80
+ async with create_async_session(intels.engine) as session:
81
+ result = await intels.get_prompt_safe(
82
+ prompt_id=request.prompt_id,
83
+ session=session
84
+ )
85
+ return PromptResponse(msg = "success",content=result)
86
+
87
+ @router.post("/update_prompt",
88
+ description="更新现行提示词",
89
+ response_model=PromptResponse)
90
+ async def update_prompt(request: UpdatePromptRequest):
91
+ async with create_async_session(intels.engine) as session:
92
+ await intels.save_prompt(
93
+ prompt_id = request.prompt_id,
94
+ new_prompt = request.prompt,
95
+ use_case = "",
96
+ action_type = "inference",
97
+ demand = "上传",
98
+ score = 70,
99
+ session = session)
100
+ return PromptResponse(msg = "success",content=None)
101
+
102
+ @router.post("/rollback_prompt",
103
+ description="回滚现行提示词",
104
+ response_model=PromptResponse)
105
+ async def roll_back(request: RollBackPromptRequest):
106
+ async with create_async_session(intels.engine) as session:
107
+ result = await intels.get_prompt_safe(
108
+ prompt_id=request.prompt_id,
109
+ version = request.version,
110
+ session=session
111
+ )
112
+ assert result.version == request.version
113
+ await intels.save_prompt(
114
+ prompt_id = request.prompt_id,
115
+ new_prompt = result.prompt,
116
+ use_case = result.use_case,
117
+ action_type = "inference",
118
+ demand = "",
119
+ score = 61,
120
+ session = session)
121
+ return PromptResponse(msg = "success",content=None)
122
+
123
+ #系统级别服务
124
+
125
+ @router.post("/sync_database")
126
+ async def sync_database(request: SyncDataBaseRequest):
127
+ slave_database_url = request.slave_database_url or slave_database_url
128
+ result = await intels.sync_production_database(slave_database_url)
129
+ return PromptResponse(msg = "success",content=result)
130
+
131
+ return router
132
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -19,6 +19,7 @@ src/pro_craft/prompt_craft/sync.py
19
19
  src/pro_craft/server/mcp/__init__.py
20
20
  src/pro_craft/server/mcp/prompt.py
21
21
  src/pro_craft/server/router/__init__.py
22
+ src/pro_craft/server/router/models.py
22
23
  src/pro_craft/server/router/prompt.py
23
24
  tests/test22.py
24
25
  tests/test_11.py
@@ -1,80 +0,0 @@
1
-
2
-
3
- from fastapi import APIRouter
4
- from pro_craft import Intel,AsyncIntel
5
- from pro_craft.utils import create_async_session
6
-
7
- def create_router(database_url: str,
8
- slave_database_url: str,
9
- model_name: str,
10
- logger = None):
11
- """
12
- # TODO 整理改为异步
13
- 创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
14
-
15
- Args:
16
- database_url (str): 数据库连接字符串。
17
- model_name (str): 用于 Intel 实例的模型名称。
18
- api_key_secret (str, optional): 用于验证 API Key 的秘密字符串。
19
- 如果提供,它将覆盖环境变量 PRO_CRAFT_API_KEY。
20
- 如果都不提供,会使用硬编码的 'your_default_secret_key'。
21
- Returns:
22
- APIRouter: 配置好的 FastAPI APIRouter 实例。
23
- """
24
-
25
- intels = AsyncIntel(
26
- database_url=database_url,
27
- model_name=model_name,
28
- logger=logger
29
- )
30
-
31
- router = APIRouter(
32
- tags=["prompt"] # 这里使用 Depends 确保每次请求都验证
33
- )
34
-
35
- @router.get("/push_order",
36
- description="可选 train,inference,summary,finetune,patch",)
37
- async def push_order(demand: str, prompt_id: str, action_type: str = "train"):
38
- result = await intels.push_action_order(
39
- demand=demand,
40
- prompt_id=prompt_id,
41
- action_type=action_type
42
- )
43
- return {"message": "success", "result": result}
44
-
45
- @router.get("/get_latest_prompt")
46
- async def get_latest_prompt(prompt_id: str):
47
- async with create_async_session(intels.engine) as session:
48
- result = await intels.get_prompts_from_sql(
49
- prompt_id=prompt_id,
50
- session=session
51
- )
52
- return {"message": "success", "result": result}
53
-
54
- @router.get("/sync_database")
55
- async def sync_database():
56
- result = await intels.sync_prompt_data_to_database(slave_database_url)
57
- return {"message": "success","result":result}
58
-
59
-
60
- @router.get("/roll_back")
61
- async def roll_back(prompt_id:str,version:str):
62
- async with create_async_session(intels.engine) as session:
63
- result = await intels.get_prompts_from_sql(
64
- prompt_id=prompt_id,
65
- version = version,
66
- session=session
67
- )
68
- assert result.version == version
69
- await intels.save_prompt_increment_version(
70
- prompt_id = prompt_id,
71
- new_prompt = result.prompt,
72
- use_case = result.use_case,
73
- action_type = "inference",
74
- demand = "",
75
- score = 61,
76
- session = session)
77
- return {"message": "success"}
78
-
79
- return router
80
-
File without changes
File without changes
File without changes
File without changes