infomankit 0.3.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infoman/__init__.py +1 -0
- infoman/cli/README.md +378 -0
- infoman/cli/__init__.py +7 -0
- infoman/cli/commands/__init__.py +3 -0
- infoman/cli/commands/init.py +312 -0
- infoman/cli/scaffold.py +634 -0
- infoman/cli/templates/Makefile.template +132 -0
- infoman/cli/templates/app/__init__.py.template +3 -0
- infoman/cli/templates/app/app.py.template +4 -0
- infoman/cli/templates/app/models_base.py.template +18 -0
- infoman/cli/templates/app/models_entity_init.py.template +11 -0
- infoman/cli/templates/app/models_schemas_init.py.template +11 -0
- infoman/cli/templates/app/repository_init.py.template +11 -0
- infoman/cli/templates/app/routers_init.py.template +15 -0
- infoman/cli/templates/app/services_init.py.template +11 -0
- infoman/cli/templates/app/static_index.html.template +39 -0
- infoman/cli/templates/app/static_main.js.template +31 -0
- infoman/cli/templates/app/static_style.css.template +111 -0
- infoman/cli/templates/app/utils_init.py.template +11 -0
- infoman/cli/templates/config/.env.dev.template +43 -0
- infoman/cli/templates/config/.env.prod.template +43 -0
- infoman/cli/templates/config/README.md.template +28 -0
- infoman/cli/templates/docker/.dockerignore.template +60 -0
- infoman/cli/templates/docker/Dockerfile.template +47 -0
- infoman/cli/templates/docker/README.md.template +240 -0
- infoman/cli/templates/docker/docker-compose.yml.template +81 -0
- infoman/cli/templates/docker/mysql_custom.cnf.template +42 -0
- infoman/cli/templates/docker/mysql_init.sql.template +15 -0
- infoman/cli/templates/project/.env.example.template +1 -0
- infoman/cli/templates/project/.gitignore.template +60 -0
- infoman/cli/templates/project/Makefile.template +38 -0
- infoman/cli/templates/project/README.md.template +137 -0
- infoman/cli/templates/project/deploy.sh.template +97 -0
- infoman/cli/templates/project/main.py.template +10 -0
- infoman/cli/templates/project/manage.sh.template +97 -0
- infoman/cli/templates/project/pyproject.toml.template +47 -0
- infoman/cli/templates/project/service.sh.template +203 -0
- infoman/config/__init__.py +25 -0
- infoman/config/base.py +67 -0
- infoman/config/db_cache.py +237 -0
- infoman/config/db_relation.py +181 -0
- infoman/config/db_vector.py +39 -0
- infoman/config/jwt.py +16 -0
- infoman/config/llm.py +16 -0
- infoman/config/log.py +627 -0
- infoman/config/mq.py +26 -0
- infoman/config/settings.py +65 -0
- infoman/llm/__init__.py +0 -0
- infoman/llm/llm.py +297 -0
- infoman/logger/__init__.py +57 -0
- infoman/logger/context.py +191 -0
- infoman/logger/core.py +358 -0
- infoman/logger/filters.py +157 -0
- infoman/logger/formatters.py +138 -0
- infoman/logger/handlers.py +276 -0
- infoman/logger/metrics.py +160 -0
- infoman/performance/README.md +583 -0
- infoman/performance/__init__.py +19 -0
- infoman/performance/cli.py +215 -0
- infoman/performance/config.py +166 -0
- infoman/performance/reporter.py +519 -0
- infoman/performance/runner.py +303 -0
- infoman/performance/standards.py +222 -0
- infoman/service/__init__.py +8 -0
- infoman/service/app.py +67 -0
- infoman/service/core/__init__.py +0 -0
- infoman/service/core/auth.py +105 -0
- infoman/service/core/lifespan.py +132 -0
- infoman/service/core/monitor.py +57 -0
- infoman/service/core/response.py +37 -0
- infoman/service/exception/__init__.py +7 -0
- infoman/service/exception/error.py +274 -0
- infoman/service/exception/exception.py +25 -0
- infoman/service/exception/handler.py +238 -0
- infoman/service/infrastructure/__init__.py +8 -0
- infoman/service/infrastructure/base.py +212 -0
- infoman/service/infrastructure/db_cache/__init__.py +8 -0
- infoman/service/infrastructure/db_cache/manager.py +194 -0
- infoman/service/infrastructure/db_relation/__init__.py +41 -0
- infoman/service/infrastructure/db_relation/manager.py +300 -0
- infoman/service/infrastructure/db_relation/manager_pro.py +408 -0
- infoman/service/infrastructure/db_relation/mysql.py +52 -0
- infoman/service/infrastructure/db_relation/pgsql.py +54 -0
- infoman/service/infrastructure/db_relation/sqllite.py +25 -0
- infoman/service/infrastructure/db_vector/__init__.py +40 -0
- infoman/service/infrastructure/db_vector/manager.py +201 -0
- infoman/service/infrastructure/db_vector/qdrant.py +322 -0
- infoman/service/infrastructure/mq/__init__.py +15 -0
- infoman/service/infrastructure/mq/manager.py +178 -0
- infoman/service/infrastructure/mq/nats/__init__.py +0 -0
- infoman/service/infrastructure/mq/nats/nats_client.py +57 -0
- infoman/service/infrastructure/mq/nats/nats_event_router.py +25 -0
- infoman/service/launch.py +284 -0
- infoman/service/middleware/__init__.py +7 -0
- infoman/service/middleware/base.py +41 -0
- infoman/service/middleware/logging.py +51 -0
- infoman/service/middleware/rate_limit.py +301 -0
- infoman/service/middleware/request_id.py +21 -0
- infoman/service/middleware/white_list.py +24 -0
- infoman/service/models/__init__.py +8 -0
- infoman/service/models/base.py +441 -0
- infoman/service/models/type/embed.py +70 -0
- infoman/service/routers/__init__.py +18 -0
- infoman/service/routers/health_router.py +311 -0
- infoman/service/routers/monitor_router.py +44 -0
- infoman/service/utils/__init__.py +8 -0
- infoman/service/utils/cache/__init__.py +0 -0
- infoman/service/utils/cache/cache.py +192 -0
- infoman/service/utils/module_loader.py +10 -0
- infoman/service/utils/parse.py +10 -0
- infoman/service/utils/resolver/__init__.py +8 -0
- infoman/service/utils/resolver/base.py +47 -0
- infoman/service/utils/resolver/resp.py +102 -0
- infoman/service/vector/__init__.py +20 -0
- infoman/service/vector/base.py +56 -0
- infoman/service/vector/qdrant.py +125 -0
- infoman/service/vector/service.py +67 -0
- infoman/utils/__init__.py +2 -0
- infoman/utils/decorators/__init__.py +8 -0
- infoman/utils/decorators/cache.py +137 -0
- infoman/utils/decorators/retry.py +99 -0
- infoman/utils/decorators/safe_execute.py +99 -0
- infoman/utils/decorators/timing.py +99 -0
- infoman/utils/encryption/__init__.py +8 -0
- infoman/utils/encryption/aes.py +66 -0
- infoman/utils/encryption/ecc.py +108 -0
- infoman/utils/encryption/rsa.py +112 -0
- infoman/utils/file/__init__.py +0 -0
- infoman/utils/file/handler.py +22 -0
- infoman/utils/hash/__init__.py +0 -0
- infoman/utils/hash/hash.py +61 -0
- infoman/utils/http/__init__.py +8 -0
- infoman/utils/http/client.py +62 -0
- infoman/utils/http/info.py +94 -0
- infoman/utils/http/result.py +19 -0
- infoman/utils/notification/__init__.py +8 -0
- infoman/utils/notification/feishu.py +35 -0
- infoman/utils/text/__init__.py +8 -0
- infoman/utils/text/extractor.py +111 -0
- infomankit-0.3.23.dist-info/METADATA +632 -0
- infomankit-0.3.23.dist-info/RECORD +143 -0
- infomankit-0.3.23.dist-info/WHEEL +4 -0
- infomankit-0.3.23.dist-info/entry_points.txt +5 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# !/usr/bin/env python
|
|
2
|
+
# -*-coding:utf-8 -*-
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
# Time :2025/12/22 21:38
|
|
6
|
+
# Author :Maxwell
|
|
7
|
+
# Description:
|
|
8
|
+
"""
|
|
9
|
+
import os
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
from functools import lru_cache
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from .base import BaseConfig
|
|
14
|
+
from .db_relation import DatabaseConfig
|
|
15
|
+
from .db_cache import RedisConfig
|
|
16
|
+
from .db_vector import VectorDBConfig
|
|
17
|
+
from .mq import MessageQueueConfig
|
|
18
|
+
from .jwt import JWTConfig
|
|
19
|
+
from .llm import LLMConfig
|
|
20
|
+
from .log import LogConfig
|
|
21
|
+
from pydantic_settings import SettingsConfigDict
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
ENV_FILE_MAP = {
|
|
25
|
+
'dev': 'config/.env.dev',
|
|
26
|
+
'test': 'config/.env.test',
|
|
27
|
+
'prod': 'config/.env.prod',
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
load_dotenv()
|
|
32
|
+
ENV = os.getenv('ENV', 'dev')
|
|
33
|
+
|
|
34
|
+
if ENV not in ENV_FILE_MAP:
|
|
35
|
+
raise ValueError(f"无效的环境变量 ENV={ENV},有效值: {list(ENV_FILE_MAP.keys())}")
|
|
36
|
+
|
|
37
|
+
ENV_FILE = ENV_FILE_MAP[ENV]
|
|
38
|
+
if not Path(ENV_FILE).exists():
|
|
39
|
+
raise FileNotFoundError(f"配置文件不存在: {ENV_FILE}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class Settings(
|
|
43
|
+
BaseConfig,
|
|
44
|
+
DatabaseConfig,
|
|
45
|
+
RedisConfig,
|
|
46
|
+
VectorDBConfig,
|
|
47
|
+
MessageQueueConfig,
|
|
48
|
+
JWTConfig,
|
|
49
|
+
LLMConfig,
|
|
50
|
+
LogConfig
|
|
51
|
+
):
|
|
52
|
+
model_config = SettingsConfigDict(
|
|
53
|
+
env_file=ENV_FILE,
|
|
54
|
+
env_file_encoding="utf-8",
|
|
55
|
+
case_sensitive=True,
|
|
56
|
+
extra="ignore",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@lru_cache(maxsize=1)
|
|
61
|
+
def get_settings() -> Settings:
|
|
62
|
+
return Settings()
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
settings = get_settings()
|
infoman/llm/__init__.py
ADDED
|
File without changes
|
infoman/llm/llm.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Dict, List, AsyncGenerator, Union, Optional
|
|
4
|
+
from litellm import acompletion
|
|
5
|
+
from infoman.config import settings as config
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class ChatResponse:
|
|
10
|
+
content: str
|
|
11
|
+
input_token_count: int
|
|
12
|
+
output_token_count: int
|
|
13
|
+
elapsed_time_ms: int = 0
|
|
14
|
+
error: Optional[str] = None
|
|
15
|
+
model: Optional[str] = None
|
|
16
|
+
finish_reason: Optional[str] = None
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def total_tokens(self) -> int:
|
|
20
|
+
return self.input_token_count + self.output_token_count
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def success(self) -> bool:
|
|
24
|
+
return self.error is None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ChatStreamResponse:
|
|
28
|
+
def __init__(self, content_generator: AsyncGenerator[str, None], model: str = None):
|
|
29
|
+
self.content_generator = content_generator
|
|
30
|
+
self.input_token_count = 0
|
|
31
|
+
self.output_token_count = 0
|
|
32
|
+
self.full_content = ""
|
|
33
|
+
self.model = model
|
|
34
|
+
self.finish_reason = None
|
|
35
|
+
|
|
36
|
+
async def __aiter__(self):
|
|
37
|
+
async for chunk in self.content_generator:
|
|
38
|
+
if (
|
|
39
|
+
hasattr(chunk.choices[0].delta, "content")
|
|
40
|
+
and chunk.choices[0].delta.content
|
|
41
|
+
):
|
|
42
|
+
content = chunk.choices[0].delta.content
|
|
43
|
+
self.full_content += content
|
|
44
|
+
yield content
|
|
45
|
+
|
|
46
|
+
async def collect(self) -> str:
|
|
47
|
+
async for _ in self:
|
|
48
|
+
pass
|
|
49
|
+
return self.full_content
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class LLM:
|
|
53
|
+
@staticmethod
|
|
54
|
+
def _extract_token_usage(response) -> tuple:
|
|
55
|
+
if hasattr(response, "usage") and response.usage:
|
|
56
|
+
input_tokens = getattr(response.usage, "prompt_tokens", 0)
|
|
57
|
+
output_tokens = getattr(response.usage, "completion_tokens", 0)
|
|
58
|
+
else:
|
|
59
|
+
input_tokens = output_tokens = 0
|
|
60
|
+
return input_tokens, output_tokens
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def _prepare_messages(
|
|
64
|
+
messages: Union[str, List[Dict]], system_prompt: Optional[str] = None
|
|
65
|
+
) -> List[Dict]:
|
|
66
|
+
if isinstance(messages, str):
|
|
67
|
+
msg_list = []
|
|
68
|
+
if system_prompt:
|
|
69
|
+
msg_list.append({"role": "system", "content": system_prompt})
|
|
70
|
+
msg_list.append({"role": "user", "content": messages})
|
|
71
|
+
return msg_list
|
|
72
|
+
|
|
73
|
+
if system_prompt and not any(msg.get("role") == "system" for msg in messages):
|
|
74
|
+
return [{"role": "system", "content": system_prompt}] + messages
|
|
75
|
+
|
|
76
|
+
return messages
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
async def _execute_completion(
|
|
80
|
+
model: str, messages: List[Dict], **kwargs
|
|
81
|
+
) -> ChatResponse:
|
|
82
|
+
start_time = time.time()
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
if not model.__contains__("/"):
|
|
86
|
+
model = f"{config.LLM_PROXY}/{model}"
|
|
87
|
+
response = await acompletion(model=model, messages=messages, **kwargs)
|
|
88
|
+
|
|
89
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
90
|
+
input_tokens, output_tokens = LLM._extract_token_usage(response)
|
|
91
|
+
|
|
92
|
+
return ChatResponse(
|
|
93
|
+
content=response.choices[0].message.content,
|
|
94
|
+
input_token_count=input_tokens,
|
|
95
|
+
output_token_count=output_tokens,
|
|
96
|
+
elapsed_time_ms=elapsed_ms,
|
|
97
|
+
model=model,
|
|
98
|
+
finish_reason=getattr(response.choices[0], "finish_reason", None),
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
103
|
+
return ChatResponse(
|
|
104
|
+
content="",
|
|
105
|
+
input_token_count=0,
|
|
106
|
+
output_token_count=0,
|
|
107
|
+
elapsed_time_ms=elapsed_ms,
|
|
108
|
+
error=str(e),
|
|
109
|
+
model=model,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
@staticmethod
|
|
113
|
+
async def ask(
|
|
114
|
+
model: str, prompt: str, system_prompt: Optional[str] = None, **kwargs
|
|
115
|
+
) -> ChatResponse:
|
|
116
|
+
messages = LLM._prepare_messages(prompt, system_prompt)
|
|
117
|
+
return await LLM._execute_completion(model, messages, **kwargs)
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
async def chat(
|
|
121
|
+
model: str,
|
|
122
|
+
messages: Union[str, List[Dict]],
|
|
123
|
+
system_prompt: Optional[str] = None,
|
|
124
|
+
**kwargs,
|
|
125
|
+
) -> ChatResponse:
|
|
126
|
+
messages = LLM._prepare_messages(messages, system_prompt)
|
|
127
|
+
return await LLM._execute_completion(model, messages, **kwargs)
|
|
128
|
+
|
|
129
|
+
@staticmethod
|
|
130
|
+
async def stream(
|
|
131
|
+
model: str,
|
|
132
|
+
messages: Union[str, List[Dict]],
|
|
133
|
+
system_prompt: Optional[str] = None,
|
|
134
|
+
**kwargs,
|
|
135
|
+
) -> ChatStreamResponse:
|
|
136
|
+
messages = LLM._prepare_messages(messages, system_prompt)
|
|
137
|
+
|
|
138
|
+
async def content_generator():
|
|
139
|
+
try:
|
|
140
|
+
stream = await acompletion(
|
|
141
|
+
model=model, messages=messages, stream=True, **kwargs
|
|
142
|
+
)
|
|
143
|
+
async for chunk in stream:
|
|
144
|
+
yield chunk
|
|
145
|
+
except Exception as e:
|
|
146
|
+
print(f"Stream error: {e}")
|
|
147
|
+
|
|
148
|
+
return ChatStreamResponse(content_generator(), model)
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
async def quick_ask(
|
|
152
|
+
model: str, prompt: str, system_prompt: Optional[str] = None, **kwargs
|
|
153
|
+
) -> str:
|
|
154
|
+
response = await LLM.ask(model, prompt, system_prompt, **kwargs)
|
|
155
|
+
return response.content if response.success else ""
|
|
156
|
+
|
|
157
|
+
@staticmethod
|
|
158
|
+
async def quick_chat(
|
|
159
|
+
model: str,
|
|
160
|
+
messages: Union[str, List[Dict]],
|
|
161
|
+
system_prompt: Optional[str] = None,
|
|
162
|
+
**kwargs,
|
|
163
|
+
) -> str:
|
|
164
|
+
response = await LLM.chat(model, messages, system_prompt, **kwargs)
|
|
165
|
+
return response.content if response.success else ""
|
|
166
|
+
|
|
167
|
+
@staticmethod
|
|
168
|
+
async def quick_stream(
|
|
169
|
+
model: str,
|
|
170
|
+
messages: Union[str, List[Dict]],
|
|
171
|
+
system_prompt: Optional[str] = None,
|
|
172
|
+
**kwargs,
|
|
173
|
+
) -> AsyncGenerator[str, None]:
|
|
174
|
+
messages = LLM._prepare_messages(messages, system_prompt)
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
stream = await acompletion(
|
|
178
|
+
model=model, messages=messages, stream=True, **kwargs
|
|
179
|
+
)
|
|
180
|
+
async for chunk in stream:
|
|
181
|
+
if (
|
|
182
|
+
hasattr(chunk.choices[0].delta, "content")
|
|
183
|
+
and chunk.choices[0].delta.content
|
|
184
|
+
):
|
|
185
|
+
yield chunk.choices[0].delta.content
|
|
186
|
+
except Exception as e:
|
|
187
|
+
print(f"Stream error: {e}")
|
|
188
|
+
|
|
189
|
+
@staticmethod
|
|
190
|
+
async def ask_with_role(
|
|
191
|
+
model: str, prompt: str, role: str, **kwargs
|
|
192
|
+
) -> ChatResponse:
|
|
193
|
+
return await LLM.ask(model, prompt, system_prompt=role, **kwargs)
|
|
194
|
+
|
|
195
|
+
@staticmethod
|
|
196
|
+
async def translate(
|
|
197
|
+
model: str, text: str, target_lang: str = "中文", **kwargs
|
|
198
|
+
) -> ChatResponse:
|
|
199
|
+
system_prompt = f"你是一个专业的翻译助手,请将用户输入的文本翻译成{target_lang},只返回翻译结果。"
|
|
200
|
+
return await LLM.ask(model, text, system_prompt=system_prompt, **kwargs)
|
|
201
|
+
|
|
202
|
+
@staticmethod
|
|
203
|
+
async def summarize(model: str, text: str, **kwargs) -> ChatResponse:
|
|
204
|
+
system_prompt = (
|
|
205
|
+
"你是一个专业的文本总结助手,请对用户提供的文本进行简洁准确的总结。"
|
|
206
|
+
)
|
|
207
|
+
return await LLM.ask(model, text, system_prompt=system_prompt, **kwargs)
|
|
208
|
+
|
|
209
|
+
@staticmethod
|
|
210
|
+
async def code_review(
|
|
211
|
+
model: str, code: str, language: str = "Python", **kwargs
|
|
212
|
+
) -> ChatResponse:
|
|
213
|
+
system_prompt = f"你是一个专业的{language}代码审查专家,请对用户提供的代码进行审查,指出潜在问题并给出改进建议。"
|
|
214
|
+
return await LLM.ask(model, code, system_prompt=system_prompt, **kwargs)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class MessageBuilder:
|
|
218
|
+
@staticmethod
|
|
219
|
+
def create_message(role: str, content: str) -> Dict:
|
|
220
|
+
return {"role": role, "content": content}
|
|
221
|
+
|
|
222
|
+
@staticmethod
|
|
223
|
+
def user(content: str) -> Dict:
|
|
224
|
+
return {"role": "user", "content": content}
|
|
225
|
+
|
|
226
|
+
@staticmethod
|
|
227
|
+
def assistant(content: str) -> Dict:
|
|
228
|
+
return {"role": "assistant", "content": content}
|
|
229
|
+
|
|
230
|
+
@staticmethod
|
|
231
|
+
def system(content: str) -> Dict:
|
|
232
|
+
return {"role": "system", "content": content}
|
|
233
|
+
|
|
234
|
+
@staticmethod
|
|
235
|
+
def build_conversation(
|
|
236
|
+
system_prompt: Optional[str] = None, *exchanges
|
|
237
|
+
) -> List[Dict]:
|
|
238
|
+
messages = []
|
|
239
|
+
|
|
240
|
+
if system_prompt:
|
|
241
|
+
messages.append(MessageBuilder.system(system_prompt))
|
|
242
|
+
|
|
243
|
+
for exchange in exchanges:
|
|
244
|
+
if len(exchange) >= 1 and exchange[0]:
|
|
245
|
+
messages.append(MessageBuilder.user(exchange[0]))
|
|
246
|
+
if len(exchange) >= 2 and exchange[1]:
|
|
247
|
+
messages.append(MessageBuilder.assistant(exchange[1]))
|
|
248
|
+
|
|
249
|
+
return messages
|
|
250
|
+
|
|
251
|
+
@staticmethod
|
|
252
|
+
def add_message(messages: List[Dict], role: str, content: str) -> List[Dict]:
|
|
253
|
+
return messages + [MessageBuilder.create_message(role, content)]
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
async def example_usage():
|
|
257
|
+
model = "litellm_proxy/aws_cs4"
|
|
258
|
+
|
|
259
|
+
import os
|
|
260
|
+
|
|
261
|
+
# os.environ["LITELLM_PROXY_API_KEY"] = "sk-"
|
|
262
|
+
# os.environ["LITELLM_PROXY_API_BASE"] = "https://"
|
|
263
|
+
|
|
264
|
+
response = await LLM.ask(
|
|
265
|
+
model, "你好", system_prompt="你是一个友好的助手", temperature=0.7
|
|
266
|
+
)
|
|
267
|
+
print(f"回答: {response.content}")
|
|
268
|
+
print(f"用时: {response.elapsed_time_ms}ms")
|
|
269
|
+
print(f"Token: {response.total_tokens}")
|
|
270
|
+
|
|
271
|
+
messages = MessageBuilder.build_conversation(
|
|
272
|
+
"你是一个helpful的助手",
|
|
273
|
+
("什么是Python?", "Python是一种编程语言..."),
|
|
274
|
+
("它有什么特点?", None),
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
chat_response = await LLM.chat(model, messages, temperature=0.5)
|
|
278
|
+
print(f"对话回答: {chat_response.content}")
|
|
279
|
+
|
|
280
|
+
print("流式输出:")
|
|
281
|
+
async for content in LLM.quick_stream(
|
|
282
|
+
model, "请写一首关于春天的诗", system_prompt="你是一个诗人"
|
|
283
|
+
):
|
|
284
|
+
print(content, end="", flush=True)
|
|
285
|
+
print()
|
|
286
|
+
|
|
287
|
+
quick_answer = await LLM.quick_ask(model, "1+1等于几?")
|
|
288
|
+
print(f"快速回答: {quick_answer}")
|
|
289
|
+
|
|
290
|
+
translation = await LLM.translate(model, "Hello World")
|
|
291
|
+
print(f"翻译结果: {translation.content}")
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
if __name__ == "__main__":
|
|
295
|
+
import asyncio
|
|
296
|
+
|
|
297
|
+
asyncio.run(example_usage())
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# !/usr/bin/env python
|
|
2
|
+
# -*-coding:utf-8 -*-
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
# Time :2025/12/23 17:27
|
|
6
|
+
# Author :Maxwell
|
|
7
|
+
# Description:
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from loguru import logger
|
|
11
|
+
from .core import setup_logger, get_logger_manager, shutdown_logger
|
|
12
|
+
from .context import (
|
|
13
|
+
set_request_id,
|
|
14
|
+
get_request_id,
|
|
15
|
+
set_user_id,
|
|
16
|
+
get_user_id,
|
|
17
|
+
set_trace_id,
|
|
18
|
+
get_trace_id,
|
|
19
|
+
set_span_id,
|
|
20
|
+
get_span_id,
|
|
21
|
+
set_context,
|
|
22
|
+
get_context,
|
|
23
|
+
get_all_context,
|
|
24
|
+
clear_context,
|
|
25
|
+
with_request_id,
|
|
26
|
+
with_user_context,
|
|
27
|
+
)
|
|
28
|
+
from .metrics import get_metrics
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
# 日志对象
|
|
32
|
+
"logger",
|
|
33
|
+
|
|
34
|
+
# 核心功能
|
|
35
|
+
"setup_logger",
|
|
36
|
+
"get_logger_manager",
|
|
37
|
+
"shutdown_logger",
|
|
38
|
+
|
|
39
|
+
# 上下文管理
|
|
40
|
+
"set_request_id",
|
|
41
|
+
"get_request_id",
|
|
42
|
+
"set_user_id",
|
|
43
|
+
"get_user_id",
|
|
44
|
+
"set_trace_id",
|
|
45
|
+
"get_trace_id",
|
|
46
|
+
"set_span_id",
|
|
47
|
+
"get_span_id",
|
|
48
|
+
"set_context",
|
|
49
|
+
"get_context",
|
|
50
|
+
"get_all_context",
|
|
51
|
+
"clear_context",
|
|
52
|
+
"with_request_id",
|
|
53
|
+
"with_user_context",
|
|
54
|
+
|
|
55
|
+
# 指标统计
|
|
56
|
+
"get_metrics",
|
|
57
|
+
]
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
# !/usr/bin/env python
|
|
2
|
+
# -*-coding:utf-8 -*-
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
# Time :2025/12/23 17:58
|
|
6
|
+
# Author :Maxwell
|
|
7
|
+
# Description:
|
|
8
|
+
"""
|
|
9
|
+
import contextvars
|
|
10
|
+
from typing import Dict, Any, Optional
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
|
|
13
|
+
# =================================================================
|
|
14
|
+
# 上下文变量
|
|
15
|
+
# =================================================================
|
|
16
|
+
|
|
17
|
+
# 请求 ID
|
|
18
|
+
request_id_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
|
|
19
|
+
'request_id', default=None
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# 用户 ID
|
|
23
|
+
user_id_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
|
|
24
|
+
'user_id', default=None
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# 追踪 ID(分布式追踪)
|
|
28
|
+
trace_id_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
|
|
29
|
+
'trace_id', default=None
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# Span ID(分布式追踪)
|
|
33
|
+
span_id_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
|
|
34
|
+
'span_id', default=None
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# 自定义上下文
|
|
38
|
+
custom_context_var: contextvars.ContextVar[Dict[str, Any]] = contextvars.ContextVar(
|
|
39
|
+
'custom_context', default={}
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# =================================================================
|
|
44
|
+
# 上下文管理函数
|
|
45
|
+
# =================================================================
|
|
46
|
+
|
|
47
|
+
def set_request_id(request_id: Optional[str] = None) -> str:
|
|
48
|
+
"""
|
|
49
|
+
设置请求 ID
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
request_id: 请求 ID(None 则自动生成)
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
请求 ID
|
|
56
|
+
"""
|
|
57
|
+
if request_id is None:
|
|
58
|
+
request_id = str(uuid4())
|
|
59
|
+
|
|
60
|
+
request_id_var.set(request_id)
|
|
61
|
+
return request_id
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_request_id() -> Optional[str]:
|
|
65
|
+
"""获取请求 ID"""
|
|
66
|
+
return request_id_var.get()
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def set_user_id(user_id: str):
|
|
70
|
+
"""设置用户 ID"""
|
|
71
|
+
user_id_var.set(user_id)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_user_id() -> Optional[str]:
|
|
75
|
+
"""获取用户 ID"""
|
|
76
|
+
return user_id_var.get()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def set_trace_id(trace_id: str):
|
|
80
|
+
"""设置追踪 ID"""
|
|
81
|
+
trace_id_var.set(trace_id)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def get_trace_id() -> Optional[str]:
|
|
85
|
+
"""获取追踪 ID"""
|
|
86
|
+
return trace_id_var.get()
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def set_span_id(span_id: str):
|
|
90
|
+
"""设置 Span ID"""
|
|
91
|
+
span_id_var.set(span_id)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def get_span_id() -> Optional[str]:
|
|
95
|
+
"""获取 Span ID"""
|
|
96
|
+
return span_id_var.get()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def set_context(key: str, value: Any):
|
|
100
|
+
"""
|
|
101
|
+
设置自定义上下文
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
key: 键
|
|
105
|
+
value: 值
|
|
106
|
+
"""
|
|
107
|
+
context = custom_context_var.get().copy()
|
|
108
|
+
context[key] = value
|
|
109
|
+
custom_context_var.set(context)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def get_context(key: str) -> Optional[Any]:
|
|
113
|
+
"""获取自定义上下文"""
|
|
114
|
+
return custom_context_var.get().get(key)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_all_context() -> Dict[str, Any]:
|
|
118
|
+
"""获取所有上下文"""
|
|
119
|
+
context = {
|
|
120
|
+
"request_id": get_request_id(),
|
|
121
|
+
"user_id": get_user_id(),
|
|
122
|
+
"trace_id": get_trace_id(),
|
|
123
|
+
"span_id": get_span_id(),
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
# 合并自定义上下文
|
|
127
|
+
context.update(custom_context_var.get())
|
|
128
|
+
|
|
129
|
+
# 移除 None 值
|
|
130
|
+
return {k: v for k, v in context.items() if v is not None}
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def clear_context():
|
|
134
|
+
"""清除所有上下文"""
|
|
135
|
+
request_id_var.set(None)
|
|
136
|
+
user_id_var.set(None)
|
|
137
|
+
trace_id_var.set(None)
|
|
138
|
+
span_id_var.set(None)
|
|
139
|
+
custom_context_var.set({})
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# =================================================================
|
|
143
|
+
# 上下文装饰器
|
|
144
|
+
# =================================================================
|
|
145
|
+
|
|
146
|
+
from functools import wraps
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def with_request_id(func):
|
|
150
|
+
"""
|
|
151
|
+
自动设置请求 ID 的装饰器
|
|
152
|
+
|
|
153
|
+
用法:
|
|
154
|
+
@with_request_id
|
|
155
|
+
def my_function():
|
|
156
|
+
logger.info("处理请求") # 自动包含 request_id
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
@wraps(func)
|
|
160
|
+
def wrapper(*args, **kwargs):
|
|
161
|
+
set_request_id()
|
|
162
|
+
try:
|
|
163
|
+
return func(*args, **kwargs)
|
|
164
|
+
finally:
|
|
165
|
+
clear_context()
|
|
166
|
+
|
|
167
|
+
return wrapper
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def with_user_context(user_id: str):
|
|
171
|
+
"""
|
|
172
|
+
自动设置用户上下文的装饰器
|
|
173
|
+
|
|
174
|
+
用法:
|
|
175
|
+
@with_user_context("user_123")
|
|
176
|
+
def my_function():
|
|
177
|
+
logger.info("用户操作") # 自动包含 user_id
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
def decorator(func):
|
|
181
|
+
@wraps(func)
|
|
182
|
+
def wrapper(*args, **kwargs):
|
|
183
|
+
set_user_id(user_id)
|
|
184
|
+
try:
|
|
185
|
+
return func(*args, **kwargs)
|
|
186
|
+
finally:
|
|
187
|
+
clear_context()
|
|
188
|
+
|
|
189
|
+
return wrapper
|
|
190
|
+
|
|
191
|
+
return decorator
|