aient 1.2.42__py3-none-any.whl → 1.2.44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/request.py CHANGED
@@ -295,7 +295,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
295
295
  if key == request.model:
296
296
  for k, v in value.items():
297
297
  payload[k] = v
298
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude", "deepseek"]) and "-" not in key:
298
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
299
299
  payload[key] = value
300
300
 
301
301
  return url, headers, payload
@@ -591,7 +591,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
591
591
  if key == request.model:
592
592
  for k, v in value.items():
593
593
  payload[k] = v
594
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
594
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
595
595
  payload[key] = value
596
596
 
597
597
  return url, headers, payload
@@ -1015,9 +1015,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1015
1015
  for item in msg.content:
1016
1016
  if item.type == "text":
1017
1017
  text_message = await get_text_message(item.text, engine)
1018
+ if "v1/responses" in url:
1019
+ text_message["type"] = "input_text"
1018
1020
  content.append(text_message)
1019
1021
  elif item.type == "image_url" and provider.get("image", True) and "o1-mini" not in original_model:
1020
1022
  image_message = await get_image_message(item.image_url.url, engine)
1023
+ if "v1/responses" in url:
1024
+ image_message = {
1025
+ "type": "input_image",
1026
+ "image_url": image_message["image_url"]["url"]
1027
+ }
1021
1028
  content.append(image_message)
1022
1029
  else:
1023
1030
  content = msg.content
@@ -1049,10 +1056,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1049
1056
  system_msg = messages.pop(0)
1050
1057
  messages[0]["content"] = system_msg["content"] + messages[0]["content"]
1051
1058
 
1052
- payload = {
1053
- "model": original_model,
1054
- "messages": messages,
1055
- }
1059
+ if "v1/responses" in url:
1060
+ payload = {
1061
+ "model": original_model,
1062
+ "input": messages,
1063
+ }
1064
+ else:
1065
+ payload = {
1066
+ "model": original_model,
1067
+ "messages": messages,
1068
+ }
1056
1069
 
1057
1070
  miss_fields = [
1058
1071
  'model',
@@ -1085,11 +1098,20 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1085
1098
  "o3" in original_model or "o4" in original_model or \
1086
1099
  "gpt-oss" in original_model or "gpt-5" in original_model:
1087
1100
  if request.model.endswith("high"):
1088
- payload["reasoning_effort"] = "high"
1101
+ if "v1/responses" in url:
1102
+ payload["reasoning"] = {"effort": "high"}
1103
+ else:
1104
+ payload["reasoning_effort"] = "high"
1089
1105
  elif request.model.endswith("low"):
1090
- payload["reasoning_effort"] = "low"
1106
+ if "v1/responses" in url:
1107
+ payload["reasoning"] = {"effort": "low"}
1108
+ else:
1109
+ payload["reasoning_effort"] = "low"
1091
1110
  else:
1092
- payload["reasoning_effort"] = "medium"
1111
+ if "v1/responses" in url:
1112
+ payload["reasoning"] = {"effort": "medium"}
1113
+ else:
1114
+ payload["reasoning_effort"] = "medium"
1093
1115
 
1094
1116
  if "temperature" in payload:
1095
1117
  payload.pop("temperature")
@@ -1127,7 +1149,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1127
1149
  if key == request.model:
1128
1150
  for k, v in value.items():
1129
1151
  payload[k] = v
1130
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1152
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
1131
1153
  payload[key] = value
1132
1154
 
1133
1155
  return url, headers, payload
@@ -1225,7 +1247,7 @@ async def get_azure_payload(request, engine, provider, api_key=None):
1225
1247
  if key == request.model:
1226
1248
  for k, v in value.items():
1227
1249
  payload[k] = v
1228
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1250
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
1229
1251
  payload[key] = value
1230
1252
 
1231
1253
  return url, headers, payload
@@ -1345,7 +1367,7 @@ async def get_azure_databricks_payload(request, engine, provider, api_key=None):
1345
1367
  if key == request.model:
1346
1368
  for k, v in value.items():
1347
1369
  payload[k] = v
1348
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1370
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
1349
1371
  payload[key] = value
1350
1372
 
1351
1373
  return url, headers, payload
@@ -1432,7 +1454,7 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
1432
1454
  if key == request.model:
1433
1455
  for k, v in value.items():
1434
1456
  payload[k] = v
1435
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1457
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
1436
1458
  payload[key] = value
1437
1459
 
1438
1460
  return url, headers, payload
@@ -1798,7 +1820,7 @@ async def get_claude_payload(request, engine, provider, api_key=None):
1798
1820
  if key == request.model:
1799
1821
  for k, v in value.items():
1800
1822
  payload[k] = v
1801
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1823
+ elif all(_model not in request.model.lower() for _model in model_dict.keys()) and "-" not in key:
1802
1824
  payload[key] = value
1803
1825
 
1804
1826
  return url, headers, payload
aient/core/response.py CHANGED
@@ -213,6 +213,10 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
213
213
 
214
214
  buffer = ""
215
215
  enter_buffer = ""
216
+
217
+ input_tokens = 0
218
+ output_tokens = 0
219
+
216
220
  async for chunk in response.aiter_text():
217
221
  buffer += chunk
218
222
  while "\n" in buffer:
@@ -221,12 +225,32 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
221
225
  if line.startswith(": keepalive"):
222
226
  yield line + end_of_line
223
227
  continue
224
- if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()):
228
+ if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()) and not line.startswith("event: "):
225
229
  if result.strip() == "[DONE]":
226
230
  break
227
231
  line = await asyncio.to_thread(json.loads, result)
228
232
  line['id'] = f"chatcmpl-{random_str}"
229
233
 
234
+ # v1/responses
235
+ if line.get("type") == "response.reasoning_summary_text.delta" and line.get("delta"):
236
+ sse_string = await generate_sse_response(timestamp, payload["model"], reasoning_content=line.get("delta"))
237
+ yield sse_string
238
+ continue
239
+ elif line.get("type") == "response.output_text.delta" and line.get("delta"):
240
+ sse_string = await generate_sse_response(timestamp, payload["model"], content=line.get("delta"))
241
+ yield sse_string
242
+ continue
243
+ elif line.get("type") == "response.output_text.done":
244
+ sse_string = await generate_sse_response(timestamp, payload["model"], stop="stop")
245
+ yield sse_string
246
+ continue
247
+ elif line.get("type") == "response.completed":
248
+ input_tokens = safe_get(line, "response", "usage", "input_tokens", default=0)
249
+ output_tokens = safe_get(line, "response", "usage", "output_tokens", default=0)
250
+ continue
251
+ elif line.get("type", "").startswith("response."):
252
+ continue
253
+
230
254
  # 处理 <think> 标签
231
255
  content = safe_get(line, "choices", 0, "delta", "content", default="")
232
256
  if "<think>" in content:
@@ -322,6 +346,11 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
322
346
  del line["choices"][0]["message"]
323
347
  json_line = await asyncio.to_thread(json.dumps, line)
324
348
  yield "data: " + json_line.strip() + end_of_line
349
+
350
+ if input_tokens and output_tokens:
351
+ sse_string = await generate_sse_response(timestamp, payload["model"], None, None, None, None, None, total_tokens=input_tokens + output_tokens, prompt_tokens=input_tokens, completion_tokens=output_tokens)
352
+ yield sse_string
353
+
325
354
  yield "data: [DONE]" + end_of_line
326
355
 
327
356
  async def fetch_azure_response_stream(client, url, headers, payload, timeout):
aient/core/utils.py CHANGED
@@ -50,7 +50,11 @@ class BaseAPI:
50
50
  self.v1_models: str = urlunparse(parsed_url[:2] + ("v1/models",) + ("",) * 3)
51
51
  else:
52
52
  self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
53
- self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
53
+
54
+ if "v1/responses" in parsed_url.path:
55
+ self.chat_url: str = urlunparse(parsed_url[:2] + ("v1/responses",) + ("",) * 3)
56
+ else:
57
+ self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
54
58
  self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
55
59
  if parsed_url.hostname == "dashscope.aliyuncs.com":
56
60
  self.audio_transcriptions: str = urlunparse(parsed_url[:2] + ("/api/v1/services/aigc/multimodal-generation/generation",) + ("",) * 3)
aient/models/chatgpt.py CHANGED
@@ -12,7 +12,7 @@ from typing import Union, Optional, Callable
12
12
  from .base import BaseLLM
13
13
  from ..plugins.registry import registry
14
14
  from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
15
- from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content, find_most_frequent_phrase
15
+ from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
16
16
  from ..core.request import prepare_request_payload
17
17
  from ..core.response import fetch_response_stream, fetch_response
18
18
  from ..architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
@@ -80,7 +80,7 @@ class TaskComplete(Exception):
80
80
  self.completion_message = message
81
81
  super().__init__(f"Task completed with message: {message}")
82
82
 
83
-
83
+ # 结尾重复响应错误
84
84
  class RepetitiveResponseError(Exception):
85
85
  """Custom exception for detecting repetitive and meaningless generated strings."""
86
86
  def __init__(self, message, phrase, count):
@@ -446,13 +446,6 @@ class chatgpt(BaseLLM):
446
446
 
447
447
  if not full_response.strip() and not need_function_call:
448
448
  raise EmptyResponseError("Response is empty")
449
- most_frequent_phrase, most_frequent_phrase_count = find_most_frequent_phrase(full_response)
450
- if most_frequent_phrase_count > 100:
451
- raise RepetitiveResponseError(
452
- f"Detected repetitive and meaningless content. The phrase '{most_frequent_phrase}' appeared {most_frequent_phrase_count} times.",
453
- most_frequent_phrase,
454
- most_frequent_phrase_count
455
- )
456
449
 
457
450
  if self.print_log:
458
451
  self.logger.info(f"total_tokens: {total_tokens}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.42
3
+ Version: 1.2.44
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -7,9 +7,9 @@ aient/architext/test/test_save_load.py,sha256=o8DqH6gDYZkFkQy-a7blqLtJTRj5e4a-Li
7
7
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
8
8
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
9
9
  aient/core/models.py,sha256=KMlCRLjtq1wQHZTJGqnbWhPS2cHq6eLdnk7peKDrzR8,7490
10
- aient/core/request.py,sha256=w3HcsS4BOcrprPjSUWPz-sfcEnX26HxN7AZCThX2gE0,76949
11
- aient/core/response.py,sha256=oKAb97XX4tbgLBdzSGTedJamGTQztp7hjL5YK3ZbJFQ,36792
12
- aient/core/utils.py,sha256=Z8vTH9w3uS8uubBa65c_aJ11A3OKGYEzm4q0brNZDSk,31594
10
+ aient/core/request.py,sha256=u9wkesp0JMQoJdLoDCNQQgiAB7a_W4Hs38uX6Ppqpi8,77836
11
+ aient/core/response.py,sha256=VYpXfF6RO3Y-fTZMGV2p-bcrd73BPAKlz33gQkOcqjE,38462
12
+ aient/core/utils.py,sha256=9T6Ze9sMnsX4NBWeYCgY3AlZdhh6HFV1LI5SojzZars,31751
13
13
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
14
14
  aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
15
15
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -17,7 +17,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
17
17
  aient/models/__init__.py,sha256=ZTiZgbfBPTjIPSKURE7t6hlFBVLRS9lluGbmqc1WjxQ,43
18
18
  aient/models/audio.py,sha256=FNW4lxG1IhxOU7L8mvcbaeC1nXk_lpUZQlg9ijQ0h_Q,1937
19
19
  aient/models/base.py,sha256=HWIGfa2A7OTccvHK0wG1-UlHB-yaWRC7hbi4oR1Mu1Y,7228
20
- aient/models/chatgpt.py,sha256=3AA_jXB_efB86VzbtWw7CyTN4HPaAF7_79XoA9EzHlw,44244
20
+ aient/models/chatgpt.py,sha256=d1ZE12AQriIl8DF6OQ3612_ieP5cbGdBhngYVUdIhKs,43814
21
21
  aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
22
22
  aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
23
23
  aient/plugins/config.py,sha256=TGgZ5SnNKZ8MmdznrZ-TEq7s2ulhAAwTSKH89bci3dA,7079
@@ -33,8 +33,8 @@ aient/plugins/websearch.py,sha256=aPsBjUQ3zQ4gzNrbVq7BMh28ENj9h_fSAeJFF2h9TNk,15
33
33
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  aient/utils/prompt.py,sha256=ZvGAt_ImJ_CGbDnWgpsWskfSV5fCkpFKRpNQjYL7M7s,11100
35
35
  aient/utils/scripts.py,sha256=D_-BCLHV_PS9r6SLXsdEAyey4bVWte-jMMJJKSx0Pcg,42530
36
- aient-1.2.42.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
37
- aient-1.2.42.dist-info/METADATA,sha256=-26zs7nKpZR08gqULPGguhPBfOut0xUWRKDiLyljpoU,4842
38
- aient-1.2.42.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
- aient-1.2.42.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
40
- aient-1.2.42.dist-info/RECORD,,
36
+ aient-1.2.44.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
37
+ aient-1.2.44.dist-info/METADATA,sha256=9SfGadkd-1zprQklGzHiYmc3FOirTKjalIMbVSGv9j8,4842
38
+ aient-1.2.44.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
+ aient-1.2.44.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
40
+ aient-1.2.44.dist-info/RECORD,,
File without changes