pro-craft 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pro-craft might be problematic. Click here for more details.

pro_craft/__init__.py CHANGED
@@ -0,0 +1,25 @@
1
+ from dotenv import load_dotenv, find_dotenv
2
+ dotenv_path = find_dotenv()
3
+ load_dotenv(".env", override=True)
4
+
5
+ from .log import Log
6
+ import logging
7
+ Log_ = Log(console_level = logging.WARNING,
8
+ log_file_name="app.log")
9
+ logger = Log_.logger
10
+ Log_.set_super_log(logger.critical)
11
+
12
+ super_log = Log_.super_log # 调试工具
13
+
14
+ def slog(s, target: str = "target",logger = logger.info):
15
+ COLOR_RED = "\033[91m"
16
+ COLOR_GREEN = "\033[92m"
17
+ COLOR_YELLOW = "\033[93m"
18
+ COLOR_BLUE = "\033[94m"
19
+ COLOR_RESET = "\033[0m" # 重置颜色
20
+
21
+ logger("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
22
+ logger(target + "\n "+"--" * 40)
23
+ logger(type(s))
24
+ logger(s)
25
+ logger("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
pro_craft/log.py CHANGED
@@ -2,117 +2,81 @@ import logging
2
2
  import os
3
3
  from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
4
4
 
5
- class Logger:
6
- """log
7
-
8
- Returns:
9
- _type_: single
10
- """
5
+ class Log:
11
6
  _instance = None
12
-
13
7
  def __new__(cls, *args, **kwargs):
14
8
  if cls._instance is None:
15
9
  cls._instance = super().__new__(cls)
16
10
  return cls._instance
17
11
 
18
- def __init__(self,level = 'debug',log_file_name = "app.log"):
19
- """
20
- Level (级别): 定义日志的严重程度。从低到高依次是:
21
- DEBUG: 详细的调试信息,通常只在开发阶段使用。
22
- INFO: 确认程序按预期运行。
23
- WARNING: 发生了意外,或将来可能出现问题,但程序仍在正常运行。
24
- ERROR: 发生了严重错误,程序可能无法执行某些功能。
25
- CRITICAL: 发生了严重错误,程序可能无法继续运行。
26
- # --- 1. 定义日志级别 ---
27
- # 建议在开发环境使用 DEBUG,生产环境使用 INFO 或 WARNING
28
-
29
- """
30
- if not hasattr(self, 'initialized'):
31
- self.initialized = True
32
-
33
- if level == 'debug':
34
- self.LOG_LEVEL = logging.DEBUG # 开发阶段
35
- elif level == 'info':
36
- self.LOG_LEVEL = logging.INFO # 生产阶段
37
- elif level == 'warning':
38
- self.LOG_LEVEL = logging.WARNING
39
- elif level == 'error':
40
- self.LOG_LEVEL = logging.ERROR
41
- elif level == 'critical':
42
- self.LOG_LEVEL = logging.CRITICAL
43
- else:
44
- self.LOG_LEVEL = logging.INFO # 默认级别
45
-
46
- # --- 2. 定义日志文件路径和名称 ---
47
- self.LOG_DIR = "logs"
48
- self.LOG_FILE_NAME = log_file_name
49
- self.LOG_FILE_PATH = os.path.join(self.LOG_DIR, self.LOG_FILE_NAME)
50
-
51
- # 确保日志目录存在
52
- os.makedirs(self.LOG_DIR, exist_ok=True)
53
- self.logger = None
54
- self.setup_logging()
55
- self.env = 'dev'
56
-
57
- def reset_level(self,level = 'debug',env = 'dev'):
58
- if level == 'debug':
59
- self.LOG_LEVEL = logging.DEBUG # 开发阶段
60
- elif level == 'info':
61
- self.LOG_LEVEL = logging.INFO # 生产阶段
62
- elif level == 'warning':
63
- self.LOG_LEVEL = logging.WARNING
64
- elif level == 'error':
65
- self.LOG_LEVEL = logging.ERROR
66
- elif level == 'critical':
67
- self.LOG_LEVEL = logging.CRITICAL
68
- else:
69
- self.LOG_LEVEL = logging.INFO # 默认级别
70
-
71
- self.setup_logging()
72
- self.env = env
12
+ def __init__(self, console_level = logging.INFO, log_file_name="app.log"):
13
+ self.Console_LOG_LEVEL = console_level
14
+ self.log_file_name = log_file_name
15
+ self.LOG_FILE_PATH = os.path.join("logs", log_file_name)
16
+ self.logger = self.get_logger()
17
+ self.super_log_level = self.logger.critical
73
18
 
74
- def setup_logging(self):
75
- """# --- 3. 配置 Logger ---
76
- """
77
- # 获取根 Logger (也可以创建自定义的 Logger: logging.getLogger('my_app'))
19
+ def get_logger(self):
78
20
  logger = logging.getLogger()
79
- logger.setLevel(self.LOG_LEVEL)
80
-
81
- # 避免重复添加 Handler (如果多次调用 setup_logging)
21
+ # logger.setLevel(self.LOG_LEVEL)
82
22
  if not logger.handlers:
83
23
  # --- 4. 配置 Formatter (格式化器) ---
84
- # 常见格式:时间 - 日志级别 - Logger名称 - 模块名 - 行号 - 消息
85
24
  formatter = logging.Formatter(
86
- '%(asctime)s - %(levelname)s - %(name)s - %(module)s:%(lineno)d - %(message)s'
25
+ "%(asctime)s - %(levelname)s - %(name)s - %(funcName)s - %(module)s:%(lineno)d - %(message)s"
87
26
  )
88
-
89
27
  # --- 5. 配置 Handler (处理器) ---
90
28
 
91
29
  # 5.1 控制台处理器 (StreamHandler)
92
30
  console_handler = logging.StreamHandler()
93
- console_handler.setLevel(logging.INFO) # 控制台只显示 INFO 及以上级别的日志
31
+ console_handler.setLevel(self.Console_LOG_LEVEL) # 控制台只显示 INFO 及以上级别的日志
94
32
  console_handler.setFormatter(formatter)
95
33
  logger.addHandler(console_handler)
96
34
 
97
- # 5.2 文件处理器 (RotatingFileHandler 或 TimedRotatingFileHandler)
98
35
 
36
+ # 文件
37
+
38
+ # 5.2 信息数据
39
+
40
+ # RotatingFileHandler: 按文件大小轮转
41
+ # maxBytes: 单个日志文件的最大字节数 (例如 10MB)
42
+ # backupCount: 保留的旧日志文件数量
99
43
  file_handler = RotatingFileHandler(
100
44
  self.LOG_FILE_PATH,
101
- maxBytes=10 * 1024 * 1024, # 10 MB
45
+ maxBytes=10 * 1024 * 1024, # 10 MB
102
46
  backupCount=5,
103
- encoding='utf-8'
47
+ encoding="utf-8",
104
48
  )
105
- file_handler.setLevel(self.LOG_LEVEL) # 文件中显示所有指定级别的日志
49
+ file_handler.setLevel(logging.INFO) # 文件中显示所有指定级别的日志
106
50
  file_handler.setFormatter(formatter)
107
51
  logger.addHandler(file_handler)
108
52
 
109
- self.logger = logger
53
+ # 5.3 错误警告日志
110
54
 
55
+ file_handler_debug = RotatingFileHandler(
56
+ self.LOG_FILE_PATH.replace('.log','_err.log'),
57
+ maxBytes=10 * 1024 * 1024, # 10 MB
58
+ backupCount=5,
59
+ encoding="utf-8",
60
+ )
61
+ file_handler_debug.setLevel(logging.WARNING) # 文件中显示所有指定级别的日志
62
+ file_handler_debug.setFormatter(formatter)
63
+ logger.addHandler(file_handler_debug)
64
+ return logger
65
+
66
+ def set_super_log(self,logger_info):
67
+ self.super_log_level = logger_info
68
+
69
+ def super_log(self,s, target: str = "target"):
70
+ COLOR_RED = "\033[91m"
71
+ COLOR_GREEN = "\033[92m"
72
+ COLOR_YELLOW = "\033[93m"
73
+ COLOR_BLUE = "\033[94m"
74
+ COLOR_RESET = "\033[0m" # 重置颜色
75
+ log_ = self.super_log_level
76
+
77
+ log_("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
78
+ log_(target + "\n "+"--" * 40)
79
+ log_(type(s))
80
+ log_(s)
81
+ log_("\n"+f"{COLOR_GREEN}=={COLOR_RESET}" * 50)
111
82
 
112
- Log = Logger(log_file_name = "app.log")
113
- del Logger
114
-
115
- """
116
- from .log import Log
117
- logger = Log.logger
118
- """
@@ -1,22 +1,46 @@
1
1
  from datetime import datetime
2
2
  from mcp.server.fastmcp import FastMCP
3
3
 
4
- # region MCP Weather
5
- mcp = FastMCP("Weather")
4
+ from pro_craft.prompt_helper import IntellectType, Intel
6
5
 
7
- @mcp.tool()
8
- def get_weather(location: str) -> str:
9
- return "Cloudy"
6
+ def create_mcp(database_url: str,
7
+ slave_database_url: str,
8
+ model_name: str):
9
+ # region MCP Weather
10
+ mcp = FastMCP("PromptManager")
10
11
 
11
- @mcp.tool()
12
- def get_time() -> str:
13
- return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
14
- # endregion
12
+ intels = Intel(
13
+ database_url=database_url,
14
+ model_name=model_name
15
+ )
15
16
 
17
+ @mcp.tool()
18
+ def push_order(demand: str, prompt_id: str, action_type: str = "train") -> str:
19
+ result = intels.push_action_order(
20
+ demand=demand,
21
+ prompt_id=prompt_id,
22
+ action_type=action_type
23
+ )
24
+ return {"message": "success", "result": result}
25
+
26
+ @mcp.tool()
27
+ def get_latest_prompt(prompt_id: str) -> str:
28
+ with create_session(intels.engine) as session:
29
+ result = intels.get_prompts_from_sql(
30
+ prompt_id=prompt_id,
31
+ session=session
32
+ )
33
+ return {"message": "success", "result": result}
16
34
 
17
- if __name__ == "__main__":
18
- mcp.run(transport="streamable-http")
19
- # mcp.run(transport="sse")
20
- # search_mcp.run(transport="sse", mount_path="/search")
21
35
 
36
+ @mcp.tool()
37
+ def sync_database() -> str:
38
+ result = intels.sync_prompt_data_to_database(slave_database_url)
39
+ return {"message": "success","result":result}
40
+
41
+ return mcp
22
42
 
43
+
44
+ if __name__ == "__main__":
45
+ mcp = create_mcp()
46
+ mcp.run(transport="streamable-http")
File without changes
@@ -0,0 +1,56 @@
1
+
2
+
3
+ from fastapi import APIRouter
4
+ from pro_craft.prompt_helper import IntellectType,Intel
5
+ from pro_craft.utils import create_session
6
+ import os
7
+
8
+ def create_router(database_url: str,
9
+ slave_database_url: str,
10
+ model_name: str):
11
+ """
12
+ 创建一个包含 ProCraft 路由的 FastAPI APIRouter 实例。
13
+
14
+ Args:
15
+ database_url (str): 数据库连接字符串。
16
+ model_name (str): 用于 Intel 实例的模型名称。
17
+ api_key_secret (str, optional): 用于验证 API Key 的秘密字符串。
18
+ 如果提供,它将覆盖环境变量 PRO_CRAFT_API_KEY。
19
+ 如果都不提供,会使用硬编码的 'your_default_secret_key'。
20
+ Returns:
21
+ APIRouter: 配置好的 FastAPI APIRouter 实例。
22
+ """
23
+
24
+ intels = Intel(
25
+ database_url=database_url,
26
+ model_name=model_name
27
+ )
28
+
29
+ router = APIRouter(
30
+ tags=["prompt"] # 这里使用 Depends 确保每次请求都验证
31
+ )
32
+
33
+ @router.get("/push_order")
34
+ async def push_order(demand: str, prompt_id: str, action_type: str = "train"):
35
+ result = intels.push_action_order(
36
+ demand=demand,
37
+ prompt_id=prompt_id,
38
+ action_type=action_type
39
+ )
40
+ return {"message": "success", "result": result}
41
+
42
+ @router.get("/get_latest_prompt")
43
+ async def get_latest_prompt(prompt_id: str):
44
+ with create_session(intels.engine) as session:
45
+ result = intels.get_prompts_from_sql(
46
+ prompt_id=prompt_id,
47
+ session=session
48
+ )
49
+ return {"message": "success", "result": result}
50
+
51
+ @router.get("/sync_database")
52
+ async def sync_database():
53
+ result = intels.sync_prompt_data_to_database(slave_database_url)
54
+ return {"message": "success","result":result}
55
+
56
+ return router
pro_craft/utils.py CHANGED
@@ -14,6 +14,11 @@ import zlib
14
14
  from volcenginesdkarkruntime import Ark
15
15
  import os
16
16
 
17
+ from contextlib import contextmanager
18
+ from sqlalchemy.orm import declarative_base, sessionmaker
19
+ from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine # 异步核心
20
+ from contextlib import asynccontextmanager # 注意这里是 asynccontextmanager
21
+
17
22
 
18
23
  def extract_(text: str, pattern_key = r"json",multi = False):
19
24
  pattern = r"```"+ pattern_key + r"([\s\S]*?)```"
@@ -96,22 +101,6 @@ def load_inpackage_file(package_name:str, file_name:str,file_type = 'yaml'):
96
101
  return f.read()
97
102
 
98
103
 
99
- def super_print(s,target:str):
100
- print()
101
- print()
102
- print("=="*21 + target + "=="*21)
103
- print()
104
- print("=="*50)
105
- print(type(s))
106
- print("=="*50)
107
- print(s)
108
- print("=="*50)
109
- print()
110
-
111
-
112
- from sqlalchemy.orm import sessionmaker
113
-
114
- from contextlib import contextmanager
115
104
  @contextmanager
116
105
  def create_session(engine):
117
106
  # 5. 创建会话 (Session)
@@ -127,17 +116,6 @@ def create_session(engine):
127
116
  finally:
128
117
  session.close() # 关闭会话,释放资源
129
118
 
130
- from contextlib import contextmanager
131
- from sqlalchemy import create_engine, Column, Integer, String, UniqueConstraint
132
- from sqlalchemy.orm import declarative_base, sessionmaker
133
- from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine # 异步核心
134
- import asyncio
135
-
136
- from contextlib import contextmanager
137
-
138
- from contextlib import asynccontextmanager # 注意这里是 asynccontextmanager
139
- import asyncio
140
-
141
119
 
142
120
  @asynccontextmanager
143
121
  async def create_async_session(async_engine):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pro-craft
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -0,0 +1,15 @@
1
+ pro_craft/__init__.py,sha256=_jw5VKpBAJkqaW9qvjKUGgV7YVTY3HxgZj6R2D1garM,729
2
+ pro_craft/database.py,sha256=5dV-h9dVaS6euHLCtf0gYfq2pchl2QFdb2PEM4gTEU4,8740
3
+ pro_craft/file_manager.py,sha256=2j7lCt9L4mtvAy8_76ibTthXLwKKmVatWIB3DSvQM7U,3805
4
+ pro_craft/log.py,sha256=x9RS_0LITN2SE8dcVaEcUFdcfr__jCYpFkIkVmqE5f0,3061
5
+ pro_craft/utils.py,sha256=R1DFkS4dsm5dIhg8lLTgBBvItvIYyyojROdh-ykqiYk,5250
6
+ pro_craft/code_helper/coder.py,sha256=NXglF1KiPtGe4HZN0MZvFJ8p9Iyd5kzIt72DQGgRwXA,24715
7
+ pro_craft/code_helper/designer.py,sha256=3gyCqrjcw61sHzDjUPKhL1LOAE8xWLLbNT8NlK2mFLc,4739
8
+ pro_craft/server/mcp/__init__.py,sha256=4dbl-lFcm0r2tkOP04OxqiZG2jR-rqF181qi2AfU6UA,123
9
+ pro_craft/server/mcp/weather.py,sha256=46mwcRS-6DFHyOSirgphkpb0Bmtt8Dhx-xHjitQ0FWo,1304
10
+ pro_craft/server/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ pro_craft/server/router/prompt.py,sha256=VbxuvXvxJ-wLEmMnk7ec3bsIF-WB_JGh7GIYxZkXyWc,1914
12
+ pro_craft-0.1.14.dist-info/METADATA,sha256=L9SmOtG2pHoqsWo150xmnLE_2ThtdKtcFicPFcyoV1s,1800
13
+ pro_craft-0.1.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ pro_craft-0.1.14.dist-info/top_level.txt,sha256=yqYDHArnYMWpeCxkmGRwlL6sJtxiOUnYylLDx9EOgFg,10
15
+ pro_craft-0.1.14.dist-info/RECORD,,
pro_craft/evals.py DELETED
@@ -1,68 +0,0 @@
1
-
2
- import json
3
-
4
- from pro_craft.log import Log
5
- logger = Log.logger
6
- editing_log = logger.debug
7
-
8
-
9
-
10
-
11
-
12
- ############evals##############
13
-
14
- # 异步
15
- class Base_Evals():
16
- def __init__(self):
17
- """
18
- # TODO 2 自动优化prompt 并提升稳定性, 并测试
19
- 通过重写继承来使用它
20
- """
21
- self.MIN_SUCCESS_RATE = 00.0 # 这里定义通过阈值, 高于该比例则通过
22
-
23
-
24
- async def _assert_eval_function(self,params):
25
- #这里定义函数的评价体系
26
- print(params,'params')
27
-
28
- async def get_success_rate(self,test_cases:list[tuple]):
29
- """
30
- # 这里定义数据
31
-
32
- """
33
-
34
- successful_assertions = 0
35
- total_assertions = len(test_cases)
36
- result_cases = []
37
-
38
- for i, params in enumerate(test_cases):
39
- try:
40
- # 这里将参数传入
41
- await self._assert_eval_function(params)
42
- successful_assertions += 1
43
- result_cases.append({"type":"Successful","--input--":params,"evaluate_info":f"满足要求"})
44
- except AssertionError as e:
45
- result_cases.append({"type":"FAILED","--input--":params,"evaluate_info":f"ERROR {e}"})
46
- except Exception as e: # 捕获其他可能的错误
47
- result_cases.append({"type":"FAILED","--input--":params,"evaluate_info":f"ERROR {e}"})
48
-
49
-
50
- success_rate = (successful_assertions / total_assertions) * 100
51
- print(f"\n--- Aggregated Results ---")
52
- print(f"Total test cases: {total_assertions}")
53
- print(f"Successful cases: {successful_assertions}")
54
- print(f"Success Rate: {success_rate:.2f}%")
55
-
56
- if success_rate >= self.MIN_SUCCESS_RATE:
57
- return "通过", json.dumps(result_cases,ensure_ascii=False)
58
- else:
59
- return "未通过",json.dumps(result_cases,ensure_ascii=False)
60
-
61
-
62
- def global_evals():
63
- pass
64
-
65
-
66
-
67
-
68
-