aient 1.2.42__py3-none-any.whl → 1.2.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aient/core/request.py +29 -7
- aient/core/response.py +30 -1
- aient/core/utils.py +5 -1
- aient/models/chatgpt.py +2 -9
- {aient-1.2.42.dist-info → aient-1.2.43.dist-info}/METADATA +1 -1
- {aient-1.2.42.dist-info → aient-1.2.43.dist-info}/RECORD +9 -9
- {aient-1.2.42.dist-info → aient-1.2.43.dist-info}/WHEEL +0 -0
- {aient-1.2.42.dist-info → aient-1.2.43.dist-info}/licenses/LICENSE +0 -0
- {aient-1.2.42.dist-info → aient-1.2.43.dist-info}/top_level.txt +0 -0
aient/core/request.py
CHANGED
@@ -1015,9 +1015,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1015
1015
|
for item in msg.content:
|
1016
1016
|
if item.type == "text":
|
1017
1017
|
text_message = await get_text_message(item.text, engine)
|
1018
|
+
if "v1/responses" in url:
|
1019
|
+
text_message["type"] = "input_text"
|
1018
1020
|
content.append(text_message)
|
1019
1021
|
elif item.type == "image_url" and provider.get("image", True) and "o1-mini" not in original_model:
|
1020
1022
|
image_message = await get_image_message(item.image_url.url, engine)
|
1023
|
+
if "v1/responses" in url:
|
1024
|
+
image_message = {
|
1025
|
+
"type": "input_image",
|
1026
|
+
"image_url": image_message["image_url"]["url"]
|
1027
|
+
}
|
1021
1028
|
content.append(image_message)
|
1022
1029
|
else:
|
1023
1030
|
content = msg.content
|
@@ -1049,10 +1056,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1049
1056
|
system_msg = messages.pop(0)
|
1050
1057
|
messages[0]["content"] = system_msg["content"] + messages[0]["content"]
|
1051
1058
|
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1059
|
+
if "v1/responses" in url:
|
1060
|
+
payload = {
|
1061
|
+
"model": original_model,
|
1062
|
+
"input": messages,
|
1063
|
+
}
|
1064
|
+
else:
|
1065
|
+
payload = {
|
1066
|
+
"model": original_model,
|
1067
|
+
"messages": messages,
|
1068
|
+
}
|
1056
1069
|
|
1057
1070
|
miss_fields = [
|
1058
1071
|
'model',
|
@@ -1085,11 +1098,20 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1085
1098
|
"o3" in original_model or "o4" in original_model or \
|
1086
1099
|
"gpt-oss" in original_model or "gpt-5" in original_model:
|
1087
1100
|
if request.model.endswith("high"):
|
1088
|
-
|
1101
|
+
if "v1/responses" in url:
|
1102
|
+
payload["reasoning"] = {"effort": "high"}
|
1103
|
+
else:
|
1104
|
+
payload["reasoning_effort"] = "high"
|
1089
1105
|
elif request.model.endswith("low"):
|
1090
|
-
|
1106
|
+
if "v1/responses" in url:
|
1107
|
+
payload["reasoning"] = {"effort": "low"}
|
1108
|
+
else:
|
1109
|
+
payload["reasoning_effort"] = "low"
|
1091
1110
|
else:
|
1092
|
-
|
1111
|
+
if "v1/responses" in url:
|
1112
|
+
payload["reasoning"] = {"effort": "medium"}
|
1113
|
+
else:
|
1114
|
+
payload["reasoning_effort"] = "medium"
|
1093
1115
|
|
1094
1116
|
if "temperature" in payload:
|
1095
1117
|
payload.pop("temperature")
|
aient/core/response.py
CHANGED
@@ -213,6 +213,10 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
|
|
213
213
|
|
214
214
|
buffer = ""
|
215
215
|
enter_buffer = ""
|
216
|
+
|
217
|
+
input_tokens = 0
|
218
|
+
output_tokens = 0
|
219
|
+
|
216
220
|
async for chunk in response.aiter_text():
|
217
221
|
buffer += chunk
|
218
222
|
while "\n" in buffer:
|
@@ -221,12 +225,32 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
|
|
221
225
|
if line.startswith(": keepalive"):
|
222
226
|
yield line + end_of_line
|
223
227
|
continue
|
224
|
-
if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()):
|
228
|
+
if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()) and not line.startswith("event: "):
|
225
229
|
if result.strip() == "[DONE]":
|
226
230
|
break
|
227
231
|
line = await asyncio.to_thread(json.loads, result)
|
228
232
|
line['id'] = f"chatcmpl-{random_str}"
|
229
233
|
|
234
|
+
# v1/responses
|
235
|
+
if line.get("type") == "response.reasoning_summary_text.delta" and line.get("delta"):
|
236
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], reasoning_content=line.get("delta"))
|
237
|
+
yield sse_string
|
238
|
+
continue
|
239
|
+
elif line.get("type") == "response.output_text.delta" and line.get("delta"):
|
240
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], content=line.get("delta"))
|
241
|
+
yield sse_string
|
242
|
+
continue
|
243
|
+
elif line.get("type") == "response.output_text.done":
|
244
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], stop="stop")
|
245
|
+
yield sse_string
|
246
|
+
continue
|
247
|
+
elif line.get("type") == "response.completed":
|
248
|
+
input_tokens = safe_get(line, "response", "usage", "input_tokens", default=0)
|
249
|
+
output_tokens = safe_get(line, "response", "usage", "output_tokens", default=0)
|
250
|
+
continue
|
251
|
+
elif line.get("type", "").startswith("response."):
|
252
|
+
continue
|
253
|
+
|
230
254
|
# 处理 <think> 标签
|
231
255
|
content = safe_get(line, "choices", 0, "delta", "content", default="")
|
232
256
|
if "<think>" in content:
|
@@ -322,6 +346,11 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
|
|
322
346
|
del line["choices"][0]["message"]
|
323
347
|
json_line = await asyncio.to_thread(json.dumps, line)
|
324
348
|
yield "data: " + json_line.strip() + end_of_line
|
349
|
+
|
350
|
+
if input_tokens and output_tokens:
|
351
|
+
sse_string = await generate_sse_response(timestamp, payload["model"], None, None, None, None, None, total_tokens=input_tokens + output_tokens, prompt_tokens=input_tokens, completion_tokens=output_tokens)
|
352
|
+
yield sse_string
|
353
|
+
|
325
354
|
yield "data: [DONE]" + end_of_line
|
326
355
|
|
327
356
|
async def fetch_azure_response_stream(client, url, headers, payload, timeout):
|
aient/core/utils.py
CHANGED
@@ -50,7 +50,11 @@ class BaseAPI:
|
|
50
50
|
self.v1_models: str = urlunparse(parsed_url[:2] + ("v1/models",) + ("",) * 3)
|
51
51
|
else:
|
52
52
|
self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
|
53
|
-
|
53
|
+
|
54
|
+
if "v1/responses" in parsed_url.path:
|
55
|
+
self.chat_url: str = urlunparse(parsed_url[:2] + ("v1/responses",) + ("",) * 3)
|
56
|
+
else:
|
57
|
+
self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
|
54
58
|
self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
|
55
59
|
if parsed_url.hostname == "dashscope.aliyuncs.com":
|
56
60
|
self.audio_transcriptions: str = urlunparse(parsed_url[:2] + ("/api/v1/services/aigc/multimodal-generation/generation",) + ("",) * 3)
|
aient/models/chatgpt.py
CHANGED
@@ -12,7 +12,7 @@ from typing import Union, Optional, Callable
|
|
12
12
|
from .base import BaseLLM
|
13
13
|
from ..plugins.registry import registry
|
14
14
|
from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
|
15
|
-
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
15
|
+
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
16
16
|
from ..core.request import prepare_request_payload
|
17
17
|
from ..core.response import fetch_response_stream, fetch_response
|
18
18
|
from ..architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
|
@@ -80,7 +80,7 @@ class TaskComplete(Exception):
|
|
80
80
|
self.completion_message = message
|
81
81
|
super().__init__(f"Task completed with message: {message}")
|
82
82
|
|
83
|
-
|
83
|
+
# 结尾重复响应错误
|
84
84
|
class RepetitiveResponseError(Exception):
|
85
85
|
"""Custom exception for detecting repetitive and meaningless generated strings."""
|
86
86
|
def __init__(self, message, phrase, count):
|
@@ -446,13 +446,6 @@ class chatgpt(BaseLLM):
|
|
446
446
|
|
447
447
|
if not full_response.strip() and not need_function_call:
|
448
448
|
raise EmptyResponseError("Response is empty")
|
449
|
-
most_frequent_phrase, most_frequent_phrase_count = find_most_frequent_phrase(full_response)
|
450
|
-
if most_frequent_phrase_count > 100:
|
451
|
-
raise RepetitiveResponseError(
|
452
|
-
f"Detected repetitive and meaningless content. The phrase '{most_frequent_phrase}' appeared {most_frequent_phrase_count} times.",
|
453
|
-
most_frequent_phrase,
|
454
|
-
most_frequent_phrase_count
|
455
|
-
)
|
456
449
|
|
457
450
|
if self.print_log:
|
458
451
|
self.logger.info(f"total_tokens: {total_tokens}")
|
@@ -7,9 +7,9 @@ aient/architext/test/test_save_load.py,sha256=o8DqH6gDYZkFkQy-a7blqLtJTRj5e4a-Li
|
|
7
7
|
aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
8
8
|
aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
9
9
|
aient/core/models.py,sha256=KMlCRLjtq1wQHZTJGqnbWhPS2cHq6eLdnk7peKDrzR8,7490
|
10
|
-
aient/core/request.py,sha256=
|
11
|
-
aient/core/response.py,sha256=
|
12
|
-
aient/core/utils.py,sha256=
|
10
|
+
aient/core/request.py,sha256=u4ebaBDJTJbi-JfdXbU1Z_HkmDOk40WQH65aqTeg7Pw,77804
|
11
|
+
aient/core/response.py,sha256=VYpXfF6RO3Y-fTZMGV2p-bcrd73BPAKlz33gQkOcqjE,38462
|
12
|
+
aient/core/utils.py,sha256=9T6Ze9sMnsX4NBWeYCgY3AlZdhh6HFV1LI5SojzZars,31751
|
13
13
|
aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
|
14
14
|
aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
|
15
15
|
aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
|
@@ -17,7 +17,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
|
|
17
17
|
aient/models/__init__.py,sha256=ZTiZgbfBPTjIPSKURE7t6hlFBVLRS9lluGbmqc1WjxQ,43
|
18
18
|
aient/models/audio.py,sha256=FNW4lxG1IhxOU7L8mvcbaeC1nXk_lpUZQlg9ijQ0h_Q,1937
|
19
19
|
aient/models/base.py,sha256=HWIGfa2A7OTccvHK0wG1-UlHB-yaWRC7hbi4oR1Mu1Y,7228
|
20
|
-
aient/models/chatgpt.py,sha256=
|
20
|
+
aient/models/chatgpt.py,sha256=d1ZE12AQriIl8DF6OQ3612_ieP5cbGdBhngYVUdIhKs,43814
|
21
21
|
aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
|
22
22
|
aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
|
23
23
|
aient/plugins/config.py,sha256=TGgZ5SnNKZ8MmdznrZ-TEq7s2ulhAAwTSKH89bci3dA,7079
|
@@ -33,8 +33,8 @@ aient/plugins/websearch.py,sha256=aPsBjUQ3zQ4gzNrbVq7BMh28ENj9h_fSAeJFF2h9TNk,15
|
|
33
33
|
aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
34
|
aient/utils/prompt.py,sha256=ZvGAt_ImJ_CGbDnWgpsWskfSV5fCkpFKRpNQjYL7M7s,11100
|
35
35
|
aient/utils/scripts.py,sha256=D_-BCLHV_PS9r6SLXsdEAyey4bVWte-jMMJJKSx0Pcg,42530
|
36
|
-
aient-1.2.
|
37
|
-
aient-1.2.
|
38
|
-
aient-1.2.
|
39
|
-
aient-1.2.
|
40
|
-
aient-1.2.
|
36
|
+
aient-1.2.43.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
|
37
|
+
aient-1.2.43.dist-info/METADATA,sha256=BjCDz94qcvrgMueCkAlkPDMWbWSiUYxPyFffXQxSYTQ,4842
|
38
|
+
aient-1.2.43.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
39
|
+
aient-1.2.43.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
|
40
|
+
aient-1.2.43.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|