sycommon-python-lib 0.1.56__py3-none-any.whl → 0.1.56b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. sycommon/config/Config.py +3 -24
  2. sycommon/llm/embedding.py +23 -78
  3. sycommon/llm/get_llm.py +164 -24
  4. sycommon/logging/kafka_log.py +433 -187
  5. sycommon/middleware/exception.py +16 -10
  6. sycommon/middleware/timeout.py +1 -2
  7. sycommon/middleware/traceid.py +76 -81
  8. sycommon/rabbitmq/rabbitmq_client.py +242 -232
  9. sycommon/rabbitmq/rabbitmq_pool.py +218 -278
  10. sycommon/rabbitmq/rabbitmq_service.py +843 -25
  11. sycommon/services.py +96 -122
  12. sycommon/synacos/nacos_service.py +779 -63
  13. sycommon/tools/merge_headers.py +0 -20
  14. sycommon/tools/snowflake.py +153 -101
  15. {sycommon_python_lib-0.1.56.dist-info → sycommon_python_lib-0.1.56b2.dist-info}/METADATA +8 -10
  16. {sycommon_python_lib-0.1.56.dist-info → sycommon_python_lib-0.1.56b2.dist-info}/RECORD +19 -40
  17. sycommon/config/LangfuseConfig.py +0 -15
  18. sycommon/config/SentryConfig.py +0 -13
  19. sycommon/llm/llm_tokens.py +0 -119
  20. sycommon/llm/struct_token.py +0 -192
  21. sycommon/llm/sy_langfuse.py +0 -103
  22. sycommon/llm/usage_token.py +0 -117
  23. sycommon/notice/__init__.py +0 -0
  24. sycommon/notice/uvicorn_monitor.py +0 -200
  25. sycommon/rabbitmq/rabbitmq_service_client_manager.py +0 -206
  26. sycommon/rabbitmq/rabbitmq_service_connection_monitor.py +0 -73
  27. sycommon/rabbitmq/rabbitmq_service_consumer_manager.py +0 -285
  28. sycommon/rabbitmq/rabbitmq_service_core.py +0 -117
  29. sycommon/rabbitmq/rabbitmq_service_producer_manager.py +0 -238
  30. sycommon/sentry/__init__.py +0 -0
  31. sycommon/sentry/sy_sentry.py +0 -35
  32. sycommon/synacos/nacos_client_base.py +0 -119
  33. sycommon/synacos/nacos_config_manager.py +0 -107
  34. sycommon/synacos/nacos_heartbeat_manager.py +0 -144
  35. sycommon/synacos/nacos_service_discovery.py +0 -157
  36. sycommon/synacos/nacos_service_registration.py +0 -270
  37. sycommon/tools/env.py +0 -62
  38. {sycommon_python_lib-0.1.56.dist-info → sycommon_python_lib-0.1.56b2.dist-info}/WHEEL +0 -0
  39. {sycommon_python_lib-0.1.56.dist-info → sycommon_python_lib-0.1.56b2.dist-info}/entry_points.txt +0 -0
  40. {sycommon_python_lib-0.1.56.dist-info → sycommon_python_lib-0.1.56b2.dist-info}/top_level.txt +0 -0
sycommon/config/Config.py CHANGED
@@ -15,13 +15,13 @@ class Config(metaclass=SingletonMeta):
15
15
  with open(config_file, 'r', encoding='utf-8') as f:
16
16
  self.config = yaml.safe_load(f)
17
17
  self.MaxBytes = self.config.get('MaxBytes', 209715200)
18
- self.Timeout = self.config.get('Timeout', 600000)
18
+ self.Timeout = self.config.get('Timeout', 300000)
19
19
  self.MaxRetries = self.config.get('MaxRetries', 3)
20
+ self.OCR = self.config.get('OCR', None)
21
+ self.INVOICE_OCR = self.config.get('INVOICE_OCR', None)
20
22
  self.llm_configs = []
21
23
  self.embedding_configs = []
22
24
  self.reranker_configs = []
23
- self.sentry_configs = []
24
- self.langfuse_configs = []
25
25
  self._process_config()
26
26
 
27
27
  def get_llm_config(self, model_name):
@@ -42,18 +42,6 @@ class Config(metaclass=SingletonMeta):
42
42
  return llm
43
43
  raise ValueError(f"No configuration found for model: {model_name}")
44
44
 
45
- def get_sentry_config(self, name):
46
- for sentry in self.sentry_configs:
47
- if sentry.get('name') == name:
48
- return sentry
49
- raise ValueError(f"No configuration found for server: {name}")
50
-
51
- def get_langfuse_config(self, name):
52
- for langfuse in self.langfuse_configs:
53
- if langfuse.get('name') == name:
54
- return langfuse
55
- raise ValueError(f"No configuration found for server: {name}")
56
-
57
45
  def _process_config(self):
58
46
  llm_config_list = self.config.get('LLMConfig', [])
59
47
  for llm_config in llm_config_list:
@@ -83,15 +71,6 @@ class Config(metaclass=SingletonMeta):
83
71
  except ValueError as e:
84
72
  print(f"Invalid LLM configuration: {e}")
85
73
 
86
- sentry_config_list = self.config.get('SentryConfig', [])
87
- for sentry_config in sentry_config_list:
88
- try:
89
- from sycommon.config.SentryConfig import SentryConfig
90
- validated_config = SentryConfig(**sentry_config)
91
- self.sentry_configs.append(validated_config.model_dump())
92
- except ValueError as e:
93
- print(f"Invalid Sentry configuration: {e}")
94
-
95
74
  def set_attr(self, share_configs: dict):
96
75
  self.config = {**self.config, **
97
76
  share_configs.get('llm', {}), **share_configs}
sycommon/llm/embedding.py CHANGED
@@ -25,21 +25,15 @@ class Embedding(metaclass=SingletonMeta):
25
25
 
26
26
  # 并发信号量
27
27
  self.semaphore = asyncio.Semaphore(self.max_concurrency)
28
- # 全局默认超时:永不超时(None)
29
- self.default_timeout = aiohttp.ClientTimeout(total=None)
30
28
 
31
29
  async def _get_embeddings_http_async(
32
30
  self,
33
31
  input: Union[str, List[str]],
34
32
  encoding_format: str = None,
35
33
  model: str = None,
36
- timeout: aiohttp.ClientTimeout = None,
37
34
  **kwargs
38
35
  ):
39
36
  async with self.semaphore:
40
- # 优先使用传入的超时,无则用全局默认
41
- request_timeout = timeout or self.default_timeout
42
-
43
37
  # 优先使用传入的模型名,无则用默认值
44
38
  target_model = model or self.default_embedding_model
45
39
  target_base_url = EmbeddingConfig.from_config(target_model).baseUrl
@@ -52,23 +46,14 @@ class Embedding(metaclass=SingletonMeta):
52
46
  }
53
47
  request_body.update(kwargs)
54
48
 
55
- try:
56
- async with aiohttp.ClientSession(timeout=request_timeout) as session:
57
- async with session.post(url, json=request_body) as response:
58
- if response.status != 200:
59
- error_detail = await response.text()
60
- SYLogger.error(
61
- f"Embedding request failed (model: {target_model}): {error_detail}")
62
- return None
63
- return await response.json()
64
- except asyncio.TimeoutError:
65
- SYLogger.error(
66
- f"Embedding request timeout (model: {target_model})")
67
- return None
68
- except Exception as e:
69
- SYLogger.error(
70
- f"Embedding request unexpected error (model: {target_model}): {str(e)}")
71
- return None
49
+ async with aiohttp.ClientSession() as session:
50
+ async with session.post(url, json=request_body) as response:
51
+ if response.status != 200:
52
+ error_detail = await response.text()
53
+ SYLogger.error(
54
+ f"Embedding request failed (model: {target_model}): {error_detail}")
55
+ return None
56
+ return await response.json()
72
57
 
73
58
  async def _get_reranker_http_async(
74
59
  self,
@@ -79,13 +64,9 @@ class Embedding(metaclass=SingletonMeta):
79
64
  max_chunks_per_doc: Optional[int] = None,
80
65
  return_documents: Optional[bool] = True,
81
66
  return_len: Optional[bool] = True,
82
- timeout: aiohttp.ClientTimeout = None,
83
67
  **kwargs
84
68
  ):
85
69
  async with self.semaphore:
86
- # 优先使用传入的超时,无则用全局默认
87
- request_timeout = timeout or self.default_timeout
88
-
89
70
  # 优先使用传入的模型名,无则用默认值
90
71
  target_model = model or self.default_reranker_model
91
72
  target_base_url = RerankerConfig.from_config(target_model).baseUrl
@@ -103,29 +84,19 @@ class Embedding(metaclass=SingletonMeta):
103
84
  }
104
85
  request_body.update(kwargs)
105
86
 
106
- try:
107
- async with aiohttp.ClientSession(timeout=request_timeout) as session:
108
- async with session.post(url, json=request_body) as response:
109
- if response.status != 200:
110
- error_detail = await response.text()
111
- SYLogger.error(
112
- f"Rerank request failed (model: {target_model}): {error_detail}")
113
- return None
114
- return await response.json()
115
- except asyncio.TimeoutError:
116
- SYLogger.error(
117
- f"Rerank request timeout (model: {target_model})")
118
- return None
119
- except Exception as e:
120
- SYLogger.error(
121
- f"Rerank request unexpected error (model: {target_model}): {str(e)}")
122
- return None
87
+ async with aiohttp.ClientSession() as session:
88
+ async with session.post(url, json=request_body) as response:
89
+ if response.status != 200:
90
+ error_detail = await response.text()
91
+ SYLogger.error(
92
+ f"Rerank request failed (model: {target_model}): {error_detail}")
93
+ return None
94
+ return await response.json()
123
95
 
124
96
  async def get_embeddings(
125
97
  self,
126
98
  corpus: List[str],
127
- model: str = None,
128
- timeout: Optional[Union[int, float]] = None
99
+ model: str = None
129
100
  ):
130
101
  """
131
102
  获取语料库的嵌入向量,结果顺序与输入语料库顺序一致
@@ -133,24 +104,12 @@ class Embedding(metaclass=SingletonMeta):
133
104
  Args:
134
105
  corpus: 待生成嵌入向量的文本列表
135
106
  model: 可选,指定使用的embedding模型名称,默认使用bge-large-zh-v1.5
136
- timeout: 可选,超时时间(秒):
137
- - 传int/float:表示总超时时间(秒)
138
- - 不传/None:使用默认永不超时配置
139
107
  """
140
- request_timeout = None
141
- if timeout is not None:
142
- if isinstance(timeout, (int, float)):
143
- request_timeout = aiohttp.ClientTimeout(total=timeout)
144
- else:
145
- SYLogger.warning(
146
- f"Invalid timeout type: {type(timeout)}, must be int/float, use default timeout")
147
-
148
108
  SYLogger.info(
149
- f"Requesting embeddings for corpus: {corpus} (model: {model or self.default_embedding_model}, max_concurrency: {self.max_concurrency}, timeout: {timeout or 'None'})")
150
-
151
- # 给每个异步任务传入模型名称和超时配置
109
+ f"Requesting embeddings for corpus: {corpus} (model: {model or self.default_embedding_model}, max_concurrency: {self.max_concurrency})")
110
+ # 给每个异步任务传入模型名称
152
111
  tasks = [self._get_embeddings_http_async(
153
- text, model=model, timeout=request_timeout) for text in corpus]
112
+ text, model=model) for text in corpus]
154
113
  results = await asyncio.gather(*tasks)
155
114
 
156
115
  vectors = []
@@ -172,8 +131,7 @@ class Embedding(metaclass=SingletonMeta):
172
131
  self,
173
132
  top_results: List[str],
174
133
  query: str,
175
- model: str = None,
176
- timeout: Optional[Union[int, float]] = None
134
+ model: str = None
177
135
  ):
178
136
  """
179
137
  对搜索结果进行重排序
@@ -182,23 +140,10 @@ class Embedding(metaclass=SingletonMeta):
182
140
  top_results: 待重排序的文本列表
183
141
  query: 排序参考的查询语句
184
142
  model: 可选,指定使用的reranker模型名称,默认使用bge-reranker-large
185
- timeout: 可选,超时时间(秒):
186
- - 传int/float:表示总超时时间(秒)
187
- - 不传/None:使用默认永不超时配置
188
143
  """
189
- request_timeout = None
190
- if timeout is not None:
191
- if isinstance(timeout, (int, float)):
192
- request_timeout = aiohttp.ClientTimeout(total=timeout)
193
- else:
194
- SYLogger.warning(
195
- f"Invalid timeout type: {type(timeout)}, must be int/float, use default timeout")
196
-
197
144
  SYLogger.info(
198
- f"Requesting reranker for top_results: {top_results} (model: {model or self.default_reranker_model}, max_concurrency: {self.max_concurrency}, timeout: {timeout or 'None'})")
199
-
200
- data = await self._get_reranker_http_async(
201
- top_results, query, model=model, timeout=request_timeout)
145
+ f"Requesting reranker for top_results: {top_results} (model: {model or self.default_reranker_model}, max_concurrency: {self.max_concurrency})")
146
+ data = await self._get_reranker_http_async(top_results, query, model=model)
202
147
  SYLogger.info(
203
148
  f"Reranker for top_results: {top_results} completed (model: {model or self.default_reranker_model})")
204
149
  return data
sycommon/llm/get_llm.py CHANGED
@@ -1,37 +1,177 @@
1
+ from typing import Dict, Type, List, Union, Optional, Callable
2
+
1
3
  from sycommon.llm.llm_logger import LLMLogger
4
+ from langchain_core.language_models import BaseChatModel
5
+ from langchain_core.runnables import Runnable, RunnableLambda
6
+ from langchain_core.output_parsers import PydanticOutputParser
7
+ from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage
2
8
  from langchain.chat_models import init_chat_model
9
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
+ from pydantic import BaseModel, ValidationError
3
11
  from sycommon.config.LLMConfig import LLMConfig
4
- from sycommon.llm.sy_langfuse import LangfuseInitializer
5
- from sycommon.llm.usage_token import LLMWithAutoTokenUsage
6
12
 
7
13
 
8
- def get_llm(
9
- model: str = None,
10
- streaming: bool = False
11
- ) -> LLMWithAutoTokenUsage:
14
+ def get_llm(model: str = None, streaming: bool = False) -> BaseChatModel:
12
15
  if not model:
13
16
  model = "Qwen2.5-72B"
14
-
17
+ # model = "SyMid"
15
18
  llmConfig = LLMConfig.from_config(model)
16
- if not llmConfig:
17
- raise Exception(f"无效的模型配置:{model}")
19
+ llm = None
20
+ if llmConfig:
21
+ llm = init_chat_model(
22
+ model_provider=llmConfig.provider,
23
+ model=llmConfig.model,
24
+ base_url=llmConfig.baseUrl,
25
+ api_key="-",
26
+ temperature=0.1,
27
+ streaming=streaming,
28
+ )
29
+ else:
30
+ raise Exception("Invalid model")
31
+
32
+ # 为LLM动态添加with_structured_output方法,官方的with_structured_output方法有概率在qwen2.5中导致模型卡死不返回数据,2.5对functioncall支持不好
33
+ def with_structured_output(
34
+ self: BaseChatModel,
35
+ output_model: Type[BaseModel],
36
+ max_retries: int = 3,
37
+ is_extract: bool = False,
38
+ override_prompt: ChatPromptTemplate = None,
39
+ # 自定义处理函数列表(每个函数接收str,返回str)
40
+ custom_processors: Optional[List[Callable[[str], str]]] = None,
41
+ # 自定义解析函数(接收str,返回BaseModel)
42
+ custom_parser: Optional[Callable[[str], BaseModel]] = None
43
+ ) -> Runnable[List[BaseMessage], BaseModel]:
44
+ parser = PydanticOutputParser(pydantic_object=output_model)
45
+
46
+ accuracy_instructions = """
47
+ 字段值的抽取准确率(0~1之间),评分规则:
48
+ 1.0(完全准确):直接从原文提取,无需任何加工,且格式与原文完全一致
49
+ 0.9(轻微处理):数据来源明确,但需进行格式标准化或冗余信息剔除(不改变原始数值)
50
+ 0.8(有限推断):数据需通过上下文关联或简单计算得出,仍有明确依据
51
+ 0.8以下(不可靠):数据需大量推测、存在歧义或来源不明,处理方式:直接忽略该数据,设置为None
52
+ """
53
+
54
+ if is_extract:
55
+ # 抽取模式下使用固定的抽取专用prompt
56
+ prompt = ChatPromptTemplate.from_messages([
57
+ MessagesPlaceholder(variable_name="messages"),
58
+ HumanMessage(content=f"""
59
+ 请提取信息并遵循以下规则:
60
+ 1. 准确率要求:{accuracy_instructions.strip()}
61
+ 2. 输出格式:{parser.get_format_instructions()}
62
+ """)
63
+ ])
64
+ else:
65
+ if override_prompt:
66
+ prompt = override_prompt
67
+ else:
68
+ prompt = ChatPromptTemplate.from_messages([
69
+ MessagesPlaceholder(variable_name="messages"),
70
+ HumanMessage(content=f"""
71
+ 输出格式:{parser.get_format_instructions()}
72
+ """)
73
+ ])
74
+
75
+ # ========== 基础处理函数 ==========
76
+ def extract_response_content(response: BaseMessage) -> str:
77
+ """提取响应中的文本内容"""
78
+ try:
79
+ return response.content
80
+ except Exception as e:
81
+ raise ValueError(f"提取响应内容失败:{str(e)}") from e
82
+
83
+ def strip_code_block_markers(content: str) -> str:
84
+ """移除JSON代码块标记(```json/```)"""
85
+ try:
86
+ return content.strip("```json").strip("```").strip()
87
+ except Exception as e:
88
+ raise ValueError(
89
+ f"移除代码块标记失败(内容:{str(content)[:100]}):{str(e)}") from e
90
+
91
+ def normalize_in_json(content: str) -> str:
92
+ """将None替换为null,确保JSON格式合法"""
93
+ try:
94
+ cleaned = content.replace("None", "null")
95
+ cleaned = cleaned.replace("none", "null")
96
+ cleaned = cleaned.replace("NONE", "null")
97
+ cleaned = cleaned.replace("''", '""')
98
+ return cleaned
99
+ except Exception as e:
100
+ raise ValueError(
101
+ f"替换None为null失败(内容:{str(content)[:100]}):{str(e)}") from e
102
+
103
+ def default_parse_to_pydantic(content: str) -> BaseModel:
104
+ """默认解析函数:将处理后的文本解析为Pydantic模型"""
105
+ try:
106
+ return parser.parse(content)
107
+ except (ValidationError, ValueError) as e:
108
+ raise type(e)(f"解析失败(原始内容:{content[:200]}):{str(e)}") from e
109
+
110
+ # ========== 构建处理链条 ==========
111
+ # 基础链 prompt → LLM → 提取响应内容
112
+ base_chain = (
113
+ prompt
114
+ | self
115
+ | RunnableLambda(extract_response_content)
116
+ )
117
+
118
+ # 处理函数链 优先使用自定义,否则用默认
119
+ if custom_processors:
120
+ # 自定义处理函数 → 转为RunnableLambda列表
121
+ process_runnables = [RunnableLambda(
122
+ func) for func in custom_processors]
123
+ else:
124
+ # 默认处理函数:移除代码块标记 → 标准化JSON空值
125
+ process_runnables = [
126
+ RunnableLambda(strip_code_block_markers),
127
+ RunnableLambda(normalize_in_json)
128
+ ]
129
+
130
+ # 拼接处理链
131
+ process_chain = base_chain
132
+ for runnable in process_runnables:
133
+ process_chain = process_chain | runnable
134
+
135
+ # 解析函数 优先使用自定义,否则用默认
136
+ parse_func = custom_parser if custom_parser else default_parse_to_pydantic
137
+ parse_chain = process_chain | RunnableLambda(parse_func)
138
+
139
+ retry_chain = parse_chain.with_retry(
140
+ retry_if_exception_type=(ValidationError, ValueError),
141
+ stop_after_attempt=max_retries,
142
+ wait_exponential_jitter=True,
143
+ exponential_jitter_params={
144
+ "initial": 0.1, # 初始等待时间(秒)
145
+ "max": 3.0, # 最大等待时间(秒)
146
+ "exp_base": 2.0, # 指数基数(默认2)
147
+ "jitter": 1.0 # 随机抖动值(默认1)
148
+ }
149
+ )
18
150
 
19
- # 初始化Langfuse
20
- langfuse_callbacks, langfuse = LangfuseInitializer.get()
151
+ class StructuredRunnable(Runnable[Union[List[BaseMessage], BaseMessage, str, Dict[str, str]], BaseModel]):
152
+ def _adapt_input(self, input: Union[List[BaseMessage], BaseMessage, str, Dict[str, str]]) -> List[BaseMessage]:
153
+ """将多种输入格式统一转换为 List[BaseMessage]"""
154
+ if isinstance(input, list) and all(isinstance(x, BaseMessage) for x in input):
155
+ return input
156
+ elif isinstance(input, BaseMessage):
157
+ return [input]
158
+ elif isinstance(input, str):
159
+ return [HumanMessage(content=input)]
160
+ elif isinstance(input, dict) and "input" in input:
161
+ return [HumanMessage(content=str(input["input"]))]
162
+ else:
163
+ raise ValueError(
164
+ "不支持的输入格式,请使用消息列表、单条消息、文本或 {'input': '文本'}")
21
165
 
22
- callbacks = [LLMLogger()] + langfuse_callbacks
166
+ def invoke(self, input: Union[List[BaseMessage], BaseMessage, str, Dict[str, str]], config={"callbacks": [LLMLogger()]}):
167
+ adapted_input = self._adapt_input(input)
168
+ return retry_chain.invoke({"messages": adapted_input}, config=config)
23
169
 
24
- llm = init_chat_model(
25
- model_provider=llmConfig.provider,
26
- model=llmConfig.model,
27
- base_url=llmConfig.baseUrl,
28
- api_key="-",
29
- temperature=0.1,
30
- streaming=streaming,
31
- callbacks=callbacks
32
- )
170
+ async def ainvoke(self, input: Union[List[BaseMessage], BaseMessage, str, Dict[str, str]], config={"callbacks": [LLMLogger()]}):
171
+ adapted_input = self._adapt_input(input)
172
+ return await retry_chain.ainvoke({"messages": adapted_input}, config=config)
33
173
 
34
- if llm is None:
35
- raise Exception(f"初始化原始LLM实例失败:{model}")
174
+ return StructuredRunnable()
36
175
 
37
- return LLMWithAutoTokenUsage(llm, langfuse)
176
+ llm.__class__.with_structured_output = with_structured_output
177
+ return llm