aient 1.2.42__tar.gz → 1.2.44__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aient-1.2.42 → aient-1.2.44}/PKG-INFO +1 -1
- {aient-1.2.42 → aient-1.2.44}/aient/core/request.py +36 -14
- {aient-1.2.42 → aient-1.2.44}/aient/core/response.py +30 -1
- {aient-1.2.42 → aient-1.2.44}/aient/core/utils.py +5 -1
- {aient-1.2.42 → aient-1.2.44}/aient/models/chatgpt.py +2 -9
- {aient-1.2.42 → aient-1.2.44}/aient.egg-info/PKG-INFO +1 -1
- {aient-1.2.42 → aient-1.2.44}/pyproject.toml +1 -1
- {aient-1.2.42 → aient-1.2.44}/LICENSE +0 -0
- {aient-1.2.42 → aient-1.2.44}/README.md +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/__init__.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/architext/architext/__init__.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/architext/architext/core.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/architext/test/openai_client.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/architext/test/test.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/architext/test/test_save_load.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/__init__.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/log_config.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/models.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/test/test_base_api.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/test/test_geminimask.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/test/test_image.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/core/test/test_payload.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/models/__init__.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/models/audio.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/models/base.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/__init__.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/arXiv.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/config.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/excute_command.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/get_time.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/image.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/list_directory.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/read_image.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/readonly.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/registry.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/run_python.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/plugins/websearch.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/utils/__init__.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/utils/prompt.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient/utils/scripts.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient.egg-info/SOURCES.txt +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient.egg-info/dependency_links.txt +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient.egg-info/requires.txt +0 -0
- {aient-1.2.42 → aient-1.2.44}/aient.egg-info/top_level.txt +0 -0
- {aient-1.2.42 → aient-1.2.44}/setup.cfg +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_Web_crawler.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_ddg_search.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_google_search.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_ollama.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_plugin.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_url.py +0 -0
- {aient-1.2.42 → aient-1.2.44}/test/test_whisper.py +0 -0
@@ -295,7 +295,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
295
295
|
if key == request.model:
|
296
296
|
for k, v in value.items():
|
297
297
|
payload[k] = v
|
298
|
-
elif all(_model not in request.model.lower() for _model in
|
298
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
299
299
|
payload[key] = value
|
300
300
|
|
301
301
|
return url, headers, payload
|
@@ -591,7 +591,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
591
591
|
if key == request.model:
|
592
592
|
for k, v in value.items():
|
593
593
|
payload[k] = v
|
594
|
-
elif all(_model not in request.model.lower() for _model in
|
594
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
595
595
|
payload[key] = value
|
596
596
|
|
597
597
|
return url, headers, payload
|
@@ -1015,9 +1015,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1015
1015
|
for item in msg.content:
|
1016
1016
|
if item.type == "text":
|
1017
1017
|
text_message = await get_text_message(item.text, engine)
|
1018
|
+
if "v1/responses" in url:
|
1019
|
+
text_message["type"] = "input_text"
|
1018
1020
|
content.append(text_message)
|
1019
1021
|
elif item.type == "image_url" and provider.get("image", True) and "o1-mini" not in original_model:
|
1020
1022
|
image_message = await get_image_message(item.image_url.url, engine)
|
1023
|
+
if "v1/responses" in url:
|
1024
|
+
image_message = {
|
1025
|
+
"type": "input_image",
|
1026
|
+
"image_url": image_message["image_url"]["url"]
|
1027
|
+
}
|
1021
1028
|
content.append(image_message)
|
1022
1029
|
else:
|
1023
1030
|
content = msg.content
|
@@ -1049,10 +1056,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1049
1056
|
system_msg = messages.pop(0)
|
1050
1057
|
messages[0]["content"] = system_msg["content"] + messages[0]["content"]
|
1051
1058
|
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1059
|
+
if "v1/responses" in url:
|
1060
|
+
payload = {
|
1061
|
+
"model": original_model,
|
1062
|
+
"input": messages,
|
1063
|
+
}
|
1064
|
+
else:
|
1065
|
+
payload = {
|
1066
|
+
"model": original_model,
|
1067
|
+
"messages": messages,
|
1068
|
+
}
|
1056
1069
|
|
1057
1070
|
miss_fields = [
|
1058
1071
|
'model',
|
@@ -1085,11 +1098,20 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1085
1098
|
"o3" in original_model or "o4" in original_model or \
|
1086
1099
|
"gpt-oss" in original_model or "gpt-5" in original_model:
|
1087
1100
|
if request.model.endswith("high"):
|
1088
|
-
|
1101
|
+
if "v1/responses" in url:
|
1102
|
+
payload["reasoning"] = {"effort": "high"}
|
1103
|
+
else:
|
1104
|
+
payload["reasoning_effort"] = "high"
|
1089
1105
|
elif request.model.endswith("low"):
|
1090
|
-
|
1106
|
+
if "v1/responses" in url:
|
1107
|
+
payload["reasoning"] = {"effort": "low"}
|
1108
|
+
else:
|
1109
|
+
payload["reasoning_effort"] = "low"
|
1091
1110
|
else:
|
1092
|
-
|
1111
|
+
if "v1/responses" in url:
|
1112
|
+
payload["reasoning"] = {"effort": "medium"}
|
1113
|
+
else:
|
1114
|
+
payload["reasoning_effort"] = "medium"
|
1093
1115
|
|
1094
1116
|
if "temperature" in payload:
|
1095
1117
|
payload.pop("temperature")
|
@@ -1127,7 +1149,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1127
1149
|
if key == request.model:
|
1128
1150
|
for k, v in value.items():
|
1129
1151
|
payload[k] = v
|
1130
|
-
elif all(_model not in request.model.lower() for _model in
|
1152
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
1131
1153
|
payload[key] = value
|
1132
1154
|
|
1133
1155
|
return url, headers, payload
|
@@ -1225,7 +1247,7 @@ async def get_azure_payload(request, engine, provider, api_key=None):
|
|
1225
1247
|
if key == request.model:
|
1226
1248
|
for k, v in value.items():
|
1227
1249
|
payload[k] = v
|
1228
|
-
elif all(_model not in request.model.lower() for _model in
|
1250
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
1229
1251
|
payload[key] = value
|
1230
1252
|
|
1231
1253
|
return url, headers, payload
|
@@ -1345,7 +1367,7 @@ async def get_azure_databricks_payload(request, engine, provider, api_key=None):
|
|
1345
1367
|
if key == request.model:
|
1346
1368
|
for k, v in value.items():
|
1347
1369
|
payload[k] = v
|
1348
|
-
elif all(_model not in request.model.lower() for _model in
|
1370
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
1349
1371
|
payload[key] = value
|
1350
1372
|
|
1351
1373
|
return url, headers, payload
|
@@ -1432,7 +1454,7 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
|
|
1432
1454
|
if key == request.model:
|
1433
1455
|
for k, v in value.items():
|
1434
1456
|
payload[k] = v
|
1435
|
-
elif all(_model not in request.model.lower() for _model in
|
1457
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
1436
1458
|
payload[key] = value
|
1437
1459
|
|
1438
1460
|
return url, headers, payload
|
@@ -1798,7 +1820,7 @@ async def get_claude_payload(request, engine, provider, api_key=None):
|
|
1798
1820
|
if key == request.model:
|
1799
1821
|
for k, v in value.items():
|
1800
1822
|
payload[k] = v
|
1801
|
-
elif all(_model not in request.model.lower() for _model in
|
1823
|
+
elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
|
1802
1824
|
payload[key] = value
|
1803
1825
|
|
1804
1826
|
return url, headers, payload
|
@@ -213,6 +213,10 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
|
|
213
213
|
|
214
214
|
buffer = ""
|
215
215
|
enter_buffer = ""
|
216
|
+
|
217
|
+
input_tokens = 0
|
218
|
+
output_tokens = 0
|
219
|
+
|
216
220
|
async for chunk in response.aiter_text():
|
217
221
|
buffer += chunk
|
218
222
|
while "\n" in buffer:
|
@@ -221,12 +225,32 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
|
|
221
225
|
if line.startswith(": keepalive"):
|
222
226
|
yield line + end_of_line
|
223
227
|
continue
|
224
|
-
if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()):
|
228
|
+
if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()) and not line.startswith("event: "):
|
225
229
|
if result.strip() == "[DONE]":
|
226
230
|
break
|
227
231
|
line = await asyncio.to_thread(json.loads, result)
|
228
232
|
line['id'] = f"chatcmpl-{random_str}"
|
229
233
|
|
234
|
+
# v1/responses
|
235
|
+
if line.get("type") == "response.reasoning_summary_text.delta" and line.get("delta"):
|
236
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], reasoning_content=line.get("delta"))
|
237
|
+
yield sse_string
|
238
|
+
continue
|
239
|
+
elif line.get("type") == "response.output_text.delta" and line.get("delta"):
|
240
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], content=line.get("delta"))
|
241
|
+
yield sse_string
|
242
|
+
continue
|
243
|
+
elif line.get("type") == "response.output_text.done":
|
244
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], stop="stop")
|
245
|
+
yield sse_string
|
246
|
+
continue
|
247
|
+
elif line.get("type") == "response.completed":
|
248
|
+
input_tokens = safe_get(line, "response", "usage", "input_tokens", default=0)
|
249
|
+
output_tokens = safe_get(line, "response", "usage", "output_tokens", default=0)
|
250
|
+
continue
|
251
|
+
elif line.get("type", "").startswith("response."):
|
252
|
+
continue
|
253
|
+
|
230
254
|
# 处理 <think> 标签
|
231
255
|
content = safe_get(line, "choices", 0, "delta", "content", default="")
|
232
256
|
if "<think>" in content:
|
@@ -322,6 +346,11 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
|
|
322
346
|
del line["choices"][0]["message"]
|
323
347
|
json_line = await asyncio.to_thread(json.dumps, line)
|
324
348
|
yield "data: " + json_line.strip() + end_of_line
|
349
|
+
|
350
|
+
if input_tokens and output_tokens:
|
351
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], None, None, None, None, None, total_tokens=input_tokens + output_tokens, prompt_tokens=input_tokens, completion_tokens=output_tokens)
|
352
|
+
yield sse_string
|
353
|
+
|
325
354
|
yield "data: [DONE]" + end_of_line
|
326
355
|
|
327
356
|
async def fetch_azure_response_stream(client, url, headers, payload, timeout):
|
@@ -50,7 +50,11 @@ class BaseAPI:
|
|
50
50
|
self.v1_models: str = urlunparse(parsed_url[:2] + ("v1/models",) + ("",) * 3)
|
51
51
|
else:
|
52
52
|
self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
|
53
|
-
|
53
|
+
|
54
|
+
if "v1/responses" in parsed_url.path:
|
55
|
+
self.chat_url: str = urlunparse(parsed_url[:2] + ("v1/responses",) + ("",) * 3)
|
56
|
+
else:
|
57
|
+
self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
|
54
58
|
self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
|
55
59
|
if parsed_url.hostname == "dashscope.aliyuncs.com":
|
56
60
|
self.audio_transcriptions: str = urlunparse(parsed_url[:2] + ("/api/v1/services/aigc/multimodal-generation/generation",) + ("",) * 3)
|
@@ -12,7 +12,7 @@ from typing import Union, Optional, Callable
|
|
12
12
|
from .base import BaseLLM
|
13
13
|
from ..plugins.registry import registry
|
14
14
|
from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
|
15
|
-
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
15
|
+
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
16
16
|
from ..core.request import prepare_request_payload
|
17
17
|
from ..core.response import fetch_response_stream, fetch_response
|
18
18
|
from ..architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
|
@@ -80,7 +80,7 @@ class TaskComplete(Exception):
|
|
80
80
|
self.completion_message = message
|
81
81
|
super().__init__(f"Task completed with message: {message}")
|
82
82
|
|
83
|
-
|
83
|
+
# 结尾重复响应错误
|
84
84
|
class RepetitiveResponseError(Exception):
|
85
85
|
"""Custom exception for detecting repetitive and meaningless generated strings."""
|
86
86
|
def __init__(self, message, phrase, count):
|
@@ -446,13 +446,6 @@ class chatgpt(BaseLLM):
|
|
446
446
|
|
447
447
|
if not full_response.strip() and not need_function_call:
|
448
448
|
raise EmptyResponseError("Response is empty")
|
449
|
-
most_frequent_phrase, most_frequent_phrase_count = find_most_frequent_phrase(full_response)
|
450
|
-
if most_frequent_phrase_count > 100:
|
451
|
-
raise RepetitiveResponseError(
|
452
|
-
f"Detected repetitive and meaningless content. The phrase '{most_frequent_phrase}' appeared {most_frequent_phrase_count} times.",
|
453
|
-
most_frequent_phrase,
|
454
|
-
most_frequent_phrase_count
|
455
|
-
)
|
456
449
|
|
457
450
|
if self.print_log:
|
458
451
|
self.logger.info(f"total_tokens: {total_tokens}")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|