aient 1.0.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aient/__init__.py +1 -0
- aient/core/.git +1 -0
- aient/core/__init__.py +1 -0
- aient/core/log_config.py +6 -0
- aient/core/models.py +227 -0
- aient/core/request.py +1361 -0
- aient/core/response.py +531 -0
- aient/core/test/test_base_api.py +17 -0
- aient/core/test/test_image.py +15 -0
- aient/core/test/test_payload.py +92 -0
- aient/core/utils.py +655 -0
- aient/models/__init__.py +9 -0
- aient/models/audio.py +63 -0
- aient/models/base.py +270 -0
- aient/models/chatgpt.py +856 -0
- aient/models/claude.py +640 -0
- aient/models/duckduckgo.py +241 -0
- aient/models/gemini.py +357 -0
- aient/models/groq.py +268 -0
- aient/models/vertex.py +420 -0
- aient/plugins/__init__.py +32 -0
- aient/plugins/arXiv.py +48 -0
- aient/plugins/config.py +178 -0
- aient/plugins/image.py +72 -0
- aient/plugins/registry.py +116 -0
- aient/plugins/run_python.py +156 -0
- aient/plugins/today.py +19 -0
- aient/plugins/websearch.py +393 -0
- aient/utils/__init__.py +0 -0
- aient/utils/prompt.py +143 -0
- aient/utils/scripts.py +235 -0
- aient-1.0.29.dist-info/METADATA +119 -0
- aient-1.0.29.dist-info/RECORD +36 -0
- aient-1.0.29.dist-info/WHEEL +5 -0
- aient-1.0.29.dist-info/licenses/LICENSE +7 -0
- aient-1.0.29.dist-info/top_level.txt +1 -0
aient/models/audio.py
ADDED
@@ -0,0 +1,63 @@
|
|
1
|
+
import os
|
2
|
+
import requests
|
3
|
+
import json
|
4
|
+
from .base import BaseLLM
|
5
|
+
|
6
|
+
API = os.environ.get('API', None)
|
7
|
+
API_URL = os.environ.get('API_URL', None)
|
8
|
+
|
9
|
+
class whisper(BaseLLM):
|
10
|
+
def __init__(
|
11
|
+
self,
|
12
|
+
api_key: str,
|
13
|
+
api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/audio/transcriptions"),
|
14
|
+
timeout: float = 20,
|
15
|
+
):
|
16
|
+
super().__init__(api_key, api_url=api_url, timeout=timeout)
|
17
|
+
self.engine: str = "whisper-1"
|
18
|
+
|
19
|
+
def generate(
|
20
|
+
self,
|
21
|
+
audio_file: bytes,
|
22
|
+
model: str = "whisper-1",
|
23
|
+
**kwargs,
|
24
|
+
):
|
25
|
+
url = self.api_url.audio_transcriptions
|
26
|
+
headers = {"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}
|
27
|
+
|
28
|
+
files = {
|
29
|
+
"file": ("audio.mp3", audio_file, "audio/mpeg")
|
30
|
+
}
|
31
|
+
|
32
|
+
data = {
|
33
|
+
"model": os.environ.get("AUDIO_MODEL_NAME") or model or self.engine,
|
34
|
+
}
|
35
|
+
try:
|
36
|
+
response = self.session.post(
|
37
|
+
url,
|
38
|
+
headers=headers,
|
39
|
+
data=data,
|
40
|
+
files=files,
|
41
|
+
timeout=kwargs.get("timeout", self.timeout),
|
42
|
+
stream=True,
|
43
|
+
)
|
44
|
+
except ConnectionError:
|
45
|
+
print("连接错误,请检查服务器状态或网络连接。")
|
46
|
+
return
|
47
|
+
except requests.exceptions.ReadTimeout:
|
48
|
+
print("请求超时,请检查网络连接或增加超时时间。{e}")
|
49
|
+
return
|
50
|
+
except Exception as e:
|
51
|
+
print(f"发生了未预料的错误: {e}")
|
52
|
+
return
|
53
|
+
|
54
|
+
if response.status_code != 200:
|
55
|
+
raise Exception(f"{response.status_code} {response.reason} {response.text}")
|
56
|
+
json_data = json.loads(response.text)
|
57
|
+
text = json_data["text"]
|
58
|
+
return text
|
59
|
+
|
60
|
+
def audio_transcriptions(text):
|
61
|
+
dallbot = whisper(api_key=f"{API}")
|
62
|
+
for data in dallbot.generate(text):
|
63
|
+
return data
|
aient/models/base.py
ADDED
@@ -0,0 +1,270 @@
|
|
1
|
+
import os
|
2
|
+
import httpx
|
3
|
+
import requests
|
4
|
+
from pathlib import Path
|
5
|
+
from collections import defaultdict
|
6
|
+
|
7
|
+
from ..utils import prompt
|
8
|
+
from ..core.utils import BaseAPI
|
9
|
+
|
10
|
+
class BaseLLM:
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
api_key: str = None,
|
14
|
+
engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
|
15
|
+
api_url: str = (os.environ.get("API_URL", None) or "https://api.openai.com/v1/chat/completions"),
|
16
|
+
system_prompt: str = prompt.chatgpt_system_prompt,
|
17
|
+
proxy: str = None,
|
18
|
+
timeout: float = 600,
|
19
|
+
max_tokens: int = None,
|
20
|
+
temperature: float = 0.5,
|
21
|
+
top_p: float = 1.0,
|
22
|
+
presence_penalty: float = 0.0,
|
23
|
+
frequency_penalty: float = 0.0,
|
24
|
+
reply_count: int = 1,
|
25
|
+
truncate_limit: int = None,
|
26
|
+
use_plugins: bool = True,
|
27
|
+
print_log: bool = False,
|
28
|
+
) -> None:
|
29
|
+
self.api_key: str = api_key
|
30
|
+
self.engine: str = engine
|
31
|
+
self.api_url: str = BaseAPI(api_url or "https://api.openai.com/v1/chat/completions")
|
32
|
+
self.system_prompt: str = system_prompt
|
33
|
+
self.max_tokens: int = max_tokens
|
34
|
+
self.truncate_limit: int = truncate_limit
|
35
|
+
self.temperature: float = temperature
|
36
|
+
self.top_p: float = top_p
|
37
|
+
self.presence_penalty: float = presence_penalty
|
38
|
+
self.frequency_penalty: float = frequency_penalty
|
39
|
+
self.reply_count: int = reply_count
|
40
|
+
self.max_tokens: int = max_tokens or (
|
41
|
+
4096
|
42
|
+
if "gpt-4-1106-preview" in engine or "gpt-4-0125-preview" in engine or "gpt-4-turbo" in engine or "gpt-3.5-turbo-1106" in engine or "claude" in engine or "gpt-4o" in engine
|
43
|
+
else 31000
|
44
|
+
if "gpt-4-32k" in engine
|
45
|
+
else 7000
|
46
|
+
if "gpt-4" in engine
|
47
|
+
else 16385
|
48
|
+
if "gpt-3.5-turbo-16k" in engine
|
49
|
+
# else 99000
|
50
|
+
# if "claude-2.1" in engine
|
51
|
+
else 4000
|
52
|
+
)
|
53
|
+
self.truncate_limit: int = truncate_limit or (
|
54
|
+
127500
|
55
|
+
if "gpt-4-1106-preview" in engine or "gpt-4-0125-preview" in engine or "gpt-4-turbo" in engine or "gpt-4o" in engine
|
56
|
+
else 30500
|
57
|
+
if "gpt-4-32k" in engine
|
58
|
+
else 6500
|
59
|
+
if "gpt-4" in engine
|
60
|
+
else 14500
|
61
|
+
if "gpt-3.5-turbo-16k" in engine or "gpt-3.5-turbo-1106" in engine
|
62
|
+
else 98500
|
63
|
+
if "claude-2.1" in engine
|
64
|
+
else 3500
|
65
|
+
)
|
66
|
+
self.timeout: float = timeout
|
67
|
+
self.proxy = proxy
|
68
|
+
self.session = requests.Session()
|
69
|
+
self.session.proxies.update(
|
70
|
+
{
|
71
|
+
"http": proxy,
|
72
|
+
"https": proxy,
|
73
|
+
},
|
74
|
+
)
|
75
|
+
if proxy := (
|
76
|
+
proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
|
77
|
+
):
|
78
|
+
if "socks5h" not in proxy:
|
79
|
+
self.aclient = httpx.AsyncClient(
|
80
|
+
follow_redirects=True,
|
81
|
+
proxies=proxy,
|
82
|
+
timeout=timeout,
|
83
|
+
)
|
84
|
+
else:
|
85
|
+
self.aclient = httpx.AsyncClient(
|
86
|
+
follow_redirects=True,
|
87
|
+
timeout=timeout,
|
88
|
+
)
|
89
|
+
|
90
|
+
self.conversation: dict[str, list[dict]] = {
|
91
|
+
"default": [
|
92
|
+
{
|
93
|
+
"role": "system",
|
94
|
+
"content": system_prompt,
|
95
|
+
},
|
96
|
+
],
|
97
|
+
}
|
98
|
+
self.truncate_limit: int = 100000
|
99
|
+
self.tokens_usage = defaultdict(int)
|
100
|
+
self.function_calls_counter = {}
|
101
|
+
self.function_call_max_loop = 10
|
102
|
+
self.use_plugins = use_plugins
|
103
|
+
self.print_log: bool = print_log
|
104
|
+
|
105
|
+
def add_to_conversation(
|
106
|
+
self,
|
107
|
+
message: list,
|
108
|
+
role: str,
|
109
|
+
convo_id: str = "default",
|
110
|
+
function_name: str = "",
|
111
|
+
) -> None:
|
112
|
+
"""
|
113
|
+
Add a message to the conversation
|
114
|
+
"""
|
115
|
+
pass
|
116
|
+
|
117
|
+
def __truncate_conversation(self, convo_id: str = "default") -> None:
|
118
|
+
"""
|
119
|
+
Truncate the conversation
|
120
|
+
"""
|
121
|
+
pass
|
122
|
+
|
123
|
+
def truncate_conversation(
|
124
|
+
self,
|
125
|
+
prompt: str,
|
126
|
+
role: str = "user",
|
127
|
+
convo_id: str = "default",
|
128
|
+
model: str = "",
|
129
|
+
pass_history: int = 9999,
|
130
|
+
**kwargs,
|
131
|
+
) -> None:
|
132
|
+
"""
|
133
|
+
Truncate the conversation
|
134
|
+
"""
|
135
|
+
pass
|
136
|
+
|
137
|
+
def extract_values(self, obj):
|
138
|
+
pass
|
139
|
+
|
140
|
+
def get_token_count(self, convo_id: str = "default") -> int:
|
141
|
+
"""
|
142
|
+
Get token count
|
143
|
+
"""
|
144
|
+
pass
|
145
|
+
|
146
|
+
def get_message_token(self, url, json_post):
|
147
|
+
pass
|
148
|
+
|
149
|
+
def get_post_body(
|
150
|
+
self,
|
151
|
+
prompt: str,
|
152
|
+
role: str = "user",
|
153
|
+
convo_id: str = "default",
|
154
|
+
model: str = "",
|
155
|
+
pass_history: int = 9999,
|
156
|
+
**kwargs,
|
157
|
+
):
|
158
|
+
pass
|
159
|
+
|
160
|
+
def get_max_tokens(self, convo_id: str) -> int:
|
161
|
+
"""
|
162
|
+
Get max tokens
|
163
|
+
"""
|
164
|
+
pass
|
165
|
+
|
166
|
+
def ask_stream(
|
167
|
+
self,
|
168
|
+
prompt: list,
|
169
|
+
role: str = "user",
|
170
|
+
convo_id: str = "default",
|
171
|
+
model: str = "",
|
172
|
+
pass_history: int = 9999,
|
173
|
+
function_name: str = "",
|
174
|
+
**kwargs,
|
175
|
+
):
|
176
|
+
"""
|
177
|
+
Ask a question
|
178
|
+
"""
|
179
|
+
pass
|
180
|
+
|
181
|
+
async def ask_stream_async(
|
182
|
+
self,
|
183
|
+
prompt: list,
|
184
|
+
role: str = "user",
|
185
|
+
convo_id: str = "default",
|
186
|
+
model: str = "",
|
187
|
+
pass_history: int = 9999,
|
188
|
+
function_name: str = "",
|
189
|
+
**kwargs,
|
190
|
+
):
|
191
|
+
"""
|
192
|
+
Ask a question
|
193
|
+
"""
|
194
|
+
pass
|
195
|
+
|
196
|
+
async def ask_async(
|
197
|
+
self,
|
198
|
+
prompt: str,
|
199
|
+
role: str = "user",
|
200
|
+
convo_id: str = "default",
|
201
|
+
model: str = "",
|
202
|
+
pass_history: int = 9999,
|
203
|
+
**kwargs,
|
204
|
+
) -> str:
|
205
|
+
"""
|
206
|
+
Non-streaming ask
|
207
|
+
"""
|
208
|
+
response = ""
|
209
|
+
async for chunk in self.ask_stream_async(
|
210
|
+
prompt=prompt,
|
211
|
+
role=role,
|
212
|
+
convo_id=convo_id,
|
213
|
+
model=model or self.engine,
|
214
|
+
pass_history=pass_history,
|
215
|
+
**kwargs,
|
216
|
+
):
|
217
|
+
response += chunk
|
218
|
+
# full_response: str = "".join([r async for r in response])
|
219
|
+
full_response: str = "".join(response)
|
220
|
+
return full_response
|
221
|
+
|
222
|
+
def ask(
|
223
|
+
self,
|
224
|
+
prompt: str,
|
225
|
+
role: str = "user",
|
226
|
+
convo_id: str = "default",
|
227
|
+
model: str = "",
|
228
|
+
pass_history: int = 0,
|
229
|
+
**kwargs,
|
230
|
+
) -> str:
|
231
|
+
"""
|
232
|
+
Non-streaming ask
|
233
|
+
"""
|
234
|
+
response = self.ask_stream(
|
235
|
+
prompt=prompt,
|
236
|
+
role=role,
|
237
|
+
convo_id=convo_id,
|
238
|
+
model=model or self.engine,
|
239
|
+
pass_history=pass_history,
|
240
|
+
**kwargs,
|
241
|
+
)
|
242
|
+
full_response: str = "".join(response)
|
243
|
+
return full_response
|
244
|
+
|
245
|
+
def rollback(self, n: int = 1, convo_id: str = "default") -> None:
|
246
|
+
"""
|
247
|
+
Rollback the conversation
|
248
|
+
"""
|
249
|
+
for _ in range(n):
|
250
|
+
self.conversation[convo_id].pop()
|
251
|
+
|
252
|
+
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
|
253
|
+
"""
|
254
|
+
Reset the conversation
|
255
|
+
"""
|
256
|
+
self.conversation[convo_id] = [
|
257
|
+
{"role": "system", "content": system_prompt or self.system_prompt},
|
258
|
+
]
|
259
|
+
|
260
|
+
def save(self, file: str, *keys: str) -> None:
|
261
|
+
"""
|
262
|
+
Save the Chatbot configuration to a JSON file
|
263
|
+
"""
|
264
|
+
pass
|
265
|
+
|
266
|
+
def load(self, file: Path, *keys_: str) -> None:
|
267
|
+
"""
|
268
|
+
Load the Chatbot configuration from a JSON file
|
269
|
+
"""
|
270
|
+
pass
|