aient 1.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aient/__init__.py +1 -0
- aient/core/.git +1 -0
- aient/core/__init__.py +1 -0
- aient/core/log_config.py +6 -0
- aient/core/models.py +227 -0
- aient/core/request.py +1361 -0
- aient/core/response.py +531 -0
- aient/core/test/test_base_api.py +17 -0
- aient/core/test/test_image.py +15 -0
- aient/core/test/test_payload.py +92 -0
- aient/core/utils.py +655 -0
- aient/models/__init__.py +9 -0
- aient/models/audio.py +63 -0
- aient/models/base.py +270 -0
- aient/models/chatgpt.py +856 -0
- aient/models/claude.py +640 -0
- aient/models/duckduckgo.py +241 -0
- aient/models/gemini.py +357 -0
- aient/models/groq.py +268 -0
- aient/models/vertex.py +420 -0
- aient/plugins/__init__.py +32 -0
- aient/plugins/arXiv.py +48 -0
- aient/plugins/config.py +178 -0
- aient/plugins/image.py +72 -0
- aient/plugins/registry.py +116 -0
- aient/plugins/run_python.py +156 -0
- aient/plugins/today.py +19 -0
- aient/plugins/websearch.py +393 -0
- aient/utils/__init__.py +0 -0
- aient/utils/prompt.py +143 -0
- aient/utils/scripts.py +235 -0
- aient-1.0.29.dist-info/METADATA +119 -0
- aient-1.0.29.dist-info/RECORD +36 -0
- aient-1.0.29.dist-info/WHEEL +5 -0
- aient-1.0.29.dist-info/licenses/LICENSE +7 -0
- aient-1.0.29.dist-info/top_level.txt +1 -0
aient/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
from .models import *
|
aient/core/.git
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
gitdir: ../../../.git/modules/src/aient/core
|
aient/core/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
# core package initialization file
|
aient/core/log_config.py
ADDED
@@ -0,0 +1,6 @@
|
|
1
|
+
import logging
|
2
|
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
3
|
+
logger = logging.getLogger("uni-api")
|
4
|
+
|
5
|
+
logging.getLogger("httpx").setLevel(logging.CRITICAL)
|
6
|
+
logging.getLogger("watchfiles.main").setLevel(logging.CRITICAL)
|
aient/core/models.py
ADDED
@@ -0,0 +1,227 @@
|
|
1
|
+
from io import IOBase
|
2
|
+
from pydantic import BaseModel, Field, model_validator, ConfigDict
|
3
|
+
from typing import List, Dict, Optional, Union, Tuple, Literal, Any
|
4
|
+
|
5
|
+
class FunctionParameter(BaseModel):
|
6
|
+
type: str
|
7
|
+
properties: Dict[str, Dict[str, Any]]
|
8
|
+
required: List[str] = None
|
9
|
+
defs: Dict[str, Any] = Field(default=None, alias="$defs")
|
10
|
+
|
11
|
+
class Config:
|
12
|
+
population_by_name = True
|
13
|
+
|
14
|
+
class Function(BaseModel):
|
15
|
+
name: str
|
16
|
+
description: str = Field(default=None)
|
17
|
+
parameters: Optional[FunctionParameter] = Field(default=None, exclude=None)
|
18
|
+
|
19
|
+
class Tool(BaseModel):
|
20
|
+
type: str
|
21
|
+
function: Function
|
22
|
+
|
23
|
+
@classmethod
|
24
|
+
def parse_raw(cls, json_str: str) -> 'Tool':
|
25
|
+
"""从JSON字符串解析Tool对象"""
|
26
|
+
return cls.model_validate_json(json_str)
|
27
|
+
|
28
|
+
class FunctionCall(BaseModel):
|
29
|
+
name: str
|
30
|
+
arguments: str
|
31
|
+
|
32
|
+
class ToolCall(BaseModel):
|
33
|
+
id: str
|
34
|
+
type: str
|
35
|
+
function: FunctionCall
|
36
|
+
|
37
|
+
class ImageUrl(BaseModel):
|
38
|
+
url: str
|
39
|
+
|
40
|
+
class ContentItem(BaseModel):
|
41
|
+
type: str
|
42
|
+
text: Optional[str] = None
|
43
|
+
image_url: Optional[ImageUrl] = None
|
44
|
+
|
45
|
+
class Message(BaseModel):
|
46
|
+
role: str
|
47
|
+
name: Optional[str] = None
|
48
|
+
arguments: Optional[str] = None
|
49
|
+
content: Optional[Union[str, List[ContentItem]]] = None
|
50
|
+
tool_calls: Optional[List[ToolCall]] = None
|
51
|
+
|
52
|
+
class Message(BaseModel):
|
53
|
+
role: str
|
54
|
+
name: Optional[str] = None
|
55
|
+
content: Optional[Union[str, List[ContentItem]]] = None
|
56
|
+
tool_calls: Optional[List[ToolCall]] = None
|
57
|
+
tool_call_id: Optional[str] = None
|
58
|
+
|
59
|
+
class Config:
|
60
|
+
extra = "allow" # 允许额外的字段
|
61
|
+
|
62
|
+
class FunctionChoice(BaseModel):
|
63
|
+
name: str
|
64
|
+
|
65
|
+
class ToolChoice(BaseModel):
|
66
|
+
type: str
|
67
|
+
function: Optional[FunctionChoice] = None
|
68
|
+
|
69
|
+
class BaseRequest(BaseModel):
|
70
|
+
request_type: Optional[Literal["chat", "image", "audio", "moderation"]] = Field(default=None, exclude=True)
|
71
|
+
|
72
|
+
import warnings
|
73
|
+
warnings.filterwarnings("ignore", category=UserWarning, message=".*shadows an attribute.*")
|
74
|
+
|
75
|
+
class JsonSchema(BaseModel):
|
76
|
+
name: str
|
77
|
+
schema: Dict[str, Any] = Field(validation_alias='schema')
|
78
|
+
|
79
|
+
model_config = ConfigDict(protected_namespaces=())
|
80
|
+
|
81
|
+
class ResponseFormat(BaseModel):
|
82
|
+
type: Literal["text", "json_object", "json_schema"]
|
83
|
+
json_schema: Optional[JsonSchema] = None
|
84
|
+
|
85
|
+
class Thinking(BaseModel):
|
86
|
+
budget_tokens: Optional[int] = None
|
87
|
+
type: Optional[Literal["enabled", "disabled"]] = None
|
88
|
+
|
89
|
+
class RequestModel(BaseRequest):
|
90
|
+
model: str
|
91
|
+
messages: List[Message]
|
92
|
+
logprobs: Optional[bool] = None
|
93
|
+
top_logprobs: Optional[int] = None
|
94
|
+
stream: Optional[bool] = None
|
95
|
+
include_usage: Optional[bool] = None
|
96
|
+
temperature: Optional[float] = 0.5
|
97
|
+
top_p: Optional[float] = 1.0
|
98
|
+
max_tokens: Optional[int] = None
|
99
|
+
max_completion_tokens: Optional[int] = None
|
100
|
+
presence_penalty: Optional[float] = 0.0
|
101
|
+
frequency_penalty: Optional[float] = 0.0
|
102
|
+
n: Optional[int] = 1
|
103
|
+
user: Optional[str] = None
|
104
|
+
tool_choice: Optional[Union[str, ToolChoice]] = None
|
105
|
+
tools: Optional[List[Tool]] = None
|
106
|
+
response_format: Optional[ResponseFormat] = None
|
107
|
+
thinking: Optional[Thinking] = None
|
108
|
+
|
109
|
+
def get_last_text_message(self) -> Optional[str]:
|
110
|
+
for message in reversed(self.messages):
|
111
|
+
if message.content:
|
112
|
+
if isinstance(message.content, str):
|
113
|
+
return message.content
|
114
|
+
elif isinstance(message.content, list):
|
115
|
+
for item in reversed(message.content):
|
116
|
+
if item.type == "text" and item.text:
|
117
|
+
return item.text
|
118
|
+
return ""
|
119
|
+
|
120
|
+
def model_dump(self, **kwargs):
|
121
|
+
data = super().model_dump(**kwargs)
|
122
|
+
|
123
|
+
# 检查并处理 tools 字段
|
124
|
+
if 'tools' in data and data['tools']:
|
125
|
+
for tool in data['tools']:
|
126
|
+
if 'function' in tool:
|
127
|
+
function_data = tool['function']
|
128
|
+
# 如果 parameters 为空或没有 properties,则移除
|
129
|
+
if 'parameters' in function_data and (
|
130
|
+
function_data['parameters'] is None or
|
131
|
+
not function_data['parameters'].get('properties')
|
132
|
+
):
|
133
|
+
function_data.pop('parameters', None)
|
134
|
+
|
135
|
+
return data
|
136
|
+
|
137
|
+
class ImageGenerationRequest(BaseRequest):
|
138
|
+
prompt: str
|
139
|
+
model: Optional[str] = "dall-e-3"
|
140
|
+
n: Optional[int] = 1
|
141
|
+
response_format: Optional[str] = "url"
|
142
|
+
size: Optional[str] = "1024x1024"
|
143
|
+
stream: bool = False
|
144
|
+
|
145
|
+
class EmbeddingRequest(BaseRequest):
|
146
|
+
input: Union[str, List[Union[str, int, List[int]]]] # 支持字符串或数组
|
147
|
+
model: str
|
148
|
+
encoding_format: Optional[str] = "float"
|
149
|
+
dimensions: Optional[int] = None
|
150
|
+
user: Optional[str] = None
|
151
|
+
stream: bool = False
|
152
|
+
|
153
|
+
class AudioTranscriptionRequest(BaseRequest):
|
154
|
+
file: Tuple[str, IOBase, str]
|
155
|
+
model: str
|
156
|
+
language: Optional[str] = None
|
157
|
+
prompt: Optional[str] = None
|
158
|
+
response_format: Optional[str] = None
|
159
|
+
temperature: Optional[float] = None
|
160
|
+
stream: bool = False
|
161
|
+
|
162
|
+
class Config:
|
163
|
+
arbitrary_types_allowed = True
|
164
|
+
|
165
|
+
class ModerationRequest(BaseRequest):
|
166
|
+
input: Union[str, List[str]]
|
167
|
+
model: Optional[str] = "text-moderation-latest"
|
168
|
+
stream: bool = False
|
169
|
+
|
170
|
+
class TextToSpeechRequest(BaseRequest):
|
171
|
+
model: str
|
172
|
+
input: str
|
173
|
+
voice: str
|
174
|
+
response_format: Optional[str] = "mp3"
|
175
|
+
speed: Optional[float] = 1.0
|
176
|
+
stream: Optional[bool] = False # Add this line
|
177
|
+
|
178
|
+
class UnifiedRequest(BaseModel):
|
179
|
+
data: Union[RequestModel, ImageGenerationRequest, AudioTranscriptionRequest, ModerationRequest, EmbeddingRequest, TextToSpeechRequest]
|
180
|
+
|
181
|
+
@model_validator(mode='before')
|
182
|
+
@classmethod
|
183
|
+
def set_request_type(cls, values):
|
184
|
+
if isinstance(values, dict):
|
185
|
+
if "messages" in values:
|
186
|
+
values["data"] = RequestModel(**values)
|
187
|
+
values["data"].request_type = "chat"
|
188
|
+
elif "prompt" in values:
|
189
|
+
values["data"] = ImageGenerationRequest(**values)
|
190
|
+
values["data"].request_type = "image"
|
191
|
+
elif "file" in values:
|
192
|
+
values["data"] = AudioTranscriptionRequest(**values)
|
193
|
+
values["data"].request_type = "audio"
|
194
|
+
elif "tts" in values.get("model", ""):
|
195
|
+
values["data"] = TextToSpeechRequest(**values)
|
196
|
+
values["data"].request_type = "tts"
|
197
|
+
elif "text-embedding" in values.get("model", ""):
|
198
|
+
values["data"] = EmbeddingRequest(**values)
|
199
|
+
values["data"].request_type = "embedding"
|
200
|
+
elif "input" in values:
|
201
|
+
values["data"] = ModerationRequest(**values)
|
202
|
+
values["data"].request_type = "moderation"
|
203
|
+
else:
|
204
|
+
raise ValueError("无法确定请求类型")
|
205
|
+
return values
|
206
|
+
|
207
|
+
if __name__ == "__main__":
|
208
|
+
# 示例JSON字符串
|
209
|
+
json_str = '''
|
210
|
+
{
|
211
|
+
"type": "function",
|
212
|
+
"function": {
|
213
|
+
"name": "clock-time____getCurrentTime____standalone",
|
214
|
+
"description": "获取当前时间",
|
215
|
+
"parameters": {
|
216
|
+
"type": "object",
|
217
|
+
"properties": {}
|
218
|
+
}
|
219
|
+
}
|
220
|
+
}
|
221
|
+
'''
|
222
|
+
|
223
|
+
# 解析JSON字符串为Tool对象
|
224
|
+
tool = Tool.parse_raw(json_str)
|
225
|
+
|
226
|
+
# parameters 字段将被自动排除
|
227
|
+
print(tool.model_dump(exclude_unset=True))
|