aient 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/request.py CHANGED
@@ -48,6 +48,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
48
48
 
49
49
  messages = []
50
50
  systemInstruction = None
51
+ system_prompt = ""
51
52
  function_arguments = None
52
53
  for msg in request.messages:
53
54
  if msg.role == "assistant":
@@ -102,7 +103,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
102
103
  messages.append({"role": msg.role, "parts": content})
103
104
  elif msg.role == "system":
104
105
  content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
105
- systemInstruction = {"parts": content}
106
+ system_prompt = system_prompt + "\n\n" + content[0]["text"]
107
+ systemInstruction = {"parts": [{"text": system_prompt}]}
106
108
 
107
109
  if any(off_model in original_model for off_model in gemini_max_token_65k_models):
108
110
  safety_settings = "OFF"
@@ -212,23 +214,35 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
212
214
  else:
213
215
  payload["generationConfig"]["maxOutputTokens"] = 8192
214
216
 
215
- # 从请求模型名中检测思考预算设置
216
- m = re.match(r".*-think-(-?\d+)", request.model)
217
- if m:
218
- try:
219
- val = int(m.group(1))
220
- if val < 0:
221
- val = 0
222
- elif val > 24576:
223
- val = 24576
224
- payload["generationConfig"]["thinkingConfig"] = {"thinkingBudget": val}
225
- except ValueError:
226
- # 如果转换为整数失败,忽略思考预算设置
227
- pass
228
-
229
- # 检测search标签
230
- if request.model.endswith("-search"):
231
- payload["tools"] = [{"googleSearch": {}}]
217
+ if "gemini-2.5" in original_model:
218
+ payload["generationConfig"]["thinkingConfig"] = {
219
+ "includeThoughts": True,
220
+ }
221
+ # 从请求模型名中检测思考预算设置
222
+ m = re.match(r".*-think-(-?\d+)", request.model)
223
+ if m:
224
+ try:
225
+ val = int(m.group(1))
226
+ if val < 0:
227
+ val = 0
228
+ elif val > 24576:
229
+ val = 24576
230
+ payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
231
+ except ValueError:
232
+ # 如果转换为整数失败,忽略思考预算设置
233
+ pass
234
+
235
+ # # 检测search标签
236
+ # if request.model.endswith("-search"):
237
+ # payload["tools"] = [{"googleSearch": {}}]
238
+
239
+ if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
240
+ for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
241
+ if key == request.model:
242
+ for k, v in value.items():
243
+ payload[k] = v
244
+ elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
245
+ payload[key] = value
232
246
 
233
247
  return url, headers, payload
234
248
 
@@ -303,16 +317,16 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
303
317
  gemini_stream = "generateContent"
304
318
  model_dict = get_model_dict(provider)
305
319
  original_model = model_dict[request.model]
306
- search_tool = None
320
+ # search_tool = None
307
321
 
308
322
  # https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-0-flash?hl=zh-cn
309
323
  pro_models = ["gemini-2.5", "gemini-2.0"]
310
324
  if any(pro_model in original_model for pro_model in pro_models):
311
325
  location = gemini2
312
- search_tool = {"googleSearch": {}}
326
+ # search_tool = {"googleSearch": {}}
313
327
  else:
314
328
  location = gemini1
315
- search_tool = {"googleSearchRetrieval": {}}
329
+ # search_tool = {"googleSearchRetrieval": {}}
316
330
 
317
331
  if "google-vertex-ai" in provider.get("base_url", ""):
318
332
  url = provider.get("base_url").rstrip('/') + "/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
@@ -334,6 +348,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
334
348
 
335
349
  messages = []
336
350
  systemInstruction = None
351
+ system_prompt = ""
337
352
  function_arguments = None
338
353
  for msg in request.messages:
339
354
  if msg.role == "assistant":
@@ -387,7 +402,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
387
402
  elif msg.role != "system":
388
403
  messages.append({"role": msg.role, "parts": content})
389
404
  elif msg.role == "system":
390
- systemInstruction = {"parts": content}
405
+ system_prompt = system_prompt + "\n\n" + content[0]["text"]
406
+ systemInstruction = {"parts": [{"text": system_prompt}]}
391
407
 
392
408
  if any(off_model in original_model for off_model in gemini_max_token_65k_models):
393
409
  safety_settings = "OFF"
@@ -469,8 +485,34 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
469
485
  else:
470
486
  payload["generationConfig"]["max_output_tokens"] = 8192
471
487
 
472
- if request.model.endswith("-search"):
473
- payload["tools"] = [search_tool]
488
+ if "gemini-2.5" in original_model:
489
+ payload["generationConfig"]["thinkingConfig"] = {
490
+ "includeThoughts": True,
491
+ }
492
+ # 从请求模型名中检测思考预算设置
493
+ m = re.match(r".*-think-(-?\d+)", request.model)
494
+ if m:
495
+ try:
496
+ val = int(m.group(1))
497
+ if val < 0:
498
+ val = 0
499
+ elif val > 24576:
500
+ val = 24576
501
+ payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
502
+ except ValueError:
503
+ # 如果转换为整数失败,忽略思考预算设置
504
+ pass
505
+
506
+ # if request.model.endswith("-search"):
507
+ # payload["tools"] = [search_tool]
508
+
509
+ if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
510
+ for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
511
+ if key == request.model:
512
+ for k, v in value.items():
513
+ payload[k] = v
514
+ elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
515
+ payload[key] = value
474
516
 
475
517
  return url, headers, payload
476
518
 
@@ -893,6 +935,9 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
893
935
  headers['Authorization'] = f"Bearer {api_key}"
894
936
 
895
937
  url = provider['base_url']
938
+ if "openrouter.ai" in url:
939
+ headers['HTTP-Referer'] = "https://github.com/yym68686/uni-api"
940
+ headers['X-Title'] = "Uni API"
896
941
 
897
942
  messages = []
898
943
  for msg in request.messages:
@@ -1010,7 +1055,11 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1010
1055
 
1011
1056
  if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
1012
1057
  for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
1013
- payload[key] = value
1058
+ if key == request.model:
1059
+ for k, v in value.items():
1060
+ payload[k] = v
1061
+ elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1062
+ payload[key] = value
1014
1063
 
1015
1064
  return url, headers, payload
1016
1065
 
@@ -1104,7 +1153,11 @@ async def get_azure_payload(request, engine, provider, api_key=None):
1104
1153
 
1105
1154
  if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
1106
1155
  for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
1107
- payload[key] = value
1156
+ if key == request.model:
1157
+ for k, v in value.items():
1158
+ payload[k] = v
1159
+ elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1160
+ payload[key] = value
1108
1161
 
1109
1162
  return url, headers, payload
1110
1163
 
@@ -1118,6 +1171,9 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
1118
1171
  headers['Authorization'] = f"Bearer {api_key}"
1119
1172
 
1120
1173
  url = provider['base_url']
1174
+ if "openrouter.ai" in url:
1175
+ headers['HTTP-Referer'] = "https://github.com/yym68686/uni-api"
1176
+ headers['X-Title'] = "Uni API"
1121
1177
 
1122
1178
  messages = []
1123
1179
  for msg in request.messages:
@@ -1165,6 +1221,14 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
1165
1221
  if field not in miss_fields and value is not None:
1166
1222
  payload[field] = value
1167
1223
 
1224
+ if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
1225
+ for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
1226
+ if key == request.model:
1227
+ for k, v in value.items():
1228
+ payload[k] = v
1229
+ elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
1230
+ payload[key] = value
1231
+
1168
1232
  return url, headers, payload
1169
1233
 
1170
1234
  async def get_cohere_payload(request, engine, provider, api_key=None):
@@ -1433,9 +1497,13 @@ async def get_claude_payload(request, engine, provider, api_key=None):
1433
1497
  message_index = message_index + 1
1434
1498
 
1435
1499
  if "claude-3-7-sonnet" in original_model:
1436
- max_tokens = 20000
1500
+ max_tokens = 128000
1437
1501
  elif "claude-3-5-sonnet" in original_model:
1438
1502
  max_tokens = 8192
1503
+ elif "claude-sonnet-4" in original_model:
1504
+ max_tokens = 64000
1505
+ elif "claude-opus-4" in original_model:
1506
+ max_tokens = 32000
1439
1507
  else:
1440
1508
  max_tokens = 4096
1441
1509
 
aient/core/response.py CHANGED
@@ -535,15 +535,25 @@ async def fetch_response(client, url, headers, payload, engine, model):
535
535
  # print("parsed_data", json.dumps(parsed_data, indent=4, ensure_ascii=False))
536
536
  content = ""
537
537
  reasoning_content = ""
538
- for item in parsed_data:
539
- chunk = safe_get(item, "candidates", 0, "content", "parts", 0, "text")
540
- is_think = safe_get(item, "candidates", 0, "content", "parts", 0, "thought", default=False)
538
+ parts_list = safe_get(parsed_data, 0, "candidates", 0, "content", "parts", default=[])
539
+ for item in parts_list:
540
+ chunk = safe_get(item, "text")
541
+ is_think = safe_get(item, "thought", default=False)
541
542
  # logger.info(f"chunk: {repr(chunk)}")
542
543
  if chunk:
543
544
  if is_think:
544
545
  reasoning_content += chunk
545
546
  else:
546
547
  content += chunk
548
+ # for item in parsed_data:
549
+ # chunk = safe_get(item, "candidates", 0, "content", "parts", 0, "text")
550
+ # is_think = safe_get(item, "candidates", 0, "content", "parts", 0, "thought", default=False)
551
+ # # logger.info(f"chunk: {repr(chunk)}")
552
+ # if chunk:
553
+ # if is_think:
554
+ # reasoning_content += chunk
555
+ # else:
556
+ # content += chunk
547
557
 
548
558
  usage_metadata = safe_get(parsed_data, -1, "usageMetadata")
549
559
  prompt_tokens = safe_get(usage_metadata, "promptTokenCount", default=0)
aient/core/utils.py CHANGED
@@ -96,6 +96,7 @@ def get_engine(provider, endpoint=None, original_model=""):
96
96
  and "o3" not in original_model \
97
97
  and "o4" not in original_model \
98
98
  and "gemini" not in original_model \
99
+ and "gemma" not in original_model \
99
100
  and "learnlm" not in original_model \
100
101
  and "grok" not in original_model \
101
102
  and parsed_url.netloc != 'api.cloudflare.com' \
@@ -172,7 +172,8 @@ def excute_command(command):
172
172
  process.stderr.close()
173
173
 
174
174
  new_output_lines = []
175
- output_lines = "".join(output_lines).strip().replace("\\u001b[A", "").replace("\\r", "\r").replace("\\\\", "").replace("\\n", "\n").replace("\r", "+++").replace("\n", "+++")
175
+ output_lines = "".join(output_lines).strip().replace("\\r", "\r").replace("\\\\", "").replace("\\n", "\n").replace("\r", "+++").replace("\n", "+++")
176
+ output_lines = re.sub(r'\\u001b\[[0-9;]*[a-zA-Z]', '', output_lines)
176
177
  for line in output_lines.split("+++"):
177
178
  if line.strip() == "":
178
179
  continue
aient/utils/scripts.py CHANGED
@@ -667,7 +667,6 @@ def convert_functions_to_xml(functions_list):
667
667
 
668
668
  if __name__ == "__main__":
669
669
 
670
- # 运行本文件:python -m beswarm.aient.src.aient.utils.scripts
671
670
  os.system("clear")
672
671
  test_xml = """
673
672
  ✅ 好的,我现在读取 `README.md` 文件。
@@ -742,4 +741,6 @@ if __name__ == "__main__":
742
741
  请提供前两个 `excute_command` 的执行结果。
743
742
  """
744
743
 
745
- print(parse_function_xml(test_xml))
744
+ print(parse_function_xml(test_xml))
745
+
746
+ # 运行本文件:python -m beswarm.aient.src.aient.utils.scripts
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.12
3
+ Version: 1.1.14
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -4,9 +4,9 @@ aient/core/.gitignore,sha256=5JRRlYYsqt_yt6iFvvzhbqh2FTUQMqwo6WwIuFzlGR8,13
4
4
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
5
5
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
6
6
  aient/core/models.py,sha256=kF-HLi1I2k_G5r153ZHuiGH8_NmpTlFMfK0_myB28YQ,7366
7
- aient/core/request.py,sha256=VItemXnWzqzS10W-RuLVrARki1w7MZMBZdyqyA5axw8,61943
8
- aient/core/response.py,sha256=BNHLazjfQT8mVg7LnPLzlX429aQM3S03pumPbOpczCI,31518
9
- aient/core/utils.py,sha256=n3dyaApN4rrSduI8cjZbeD0mv8_O5LPTTbwRkj1_v4w,26540
7
+ aient/core/request.py,sha256=9GbzEg7jIH8s-jXeB1gsfoOsDbwg4C6LqXvRxVTnqEs,65263
8
+ aient/core/response.py,sha256=Z0Bjl_QvpUguyky1LIcsVks4BKKqT0eYEpDmKa_cwpQ,31978
9
+ aient/core/utils.py,sha256=-naFCv8V-qhnqvDUd8BNbW1HR9CVAPxISrXoAz464Qg,26580
10
10
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
11
11
  aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
12
12
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -23,7 +23,7 @@ aient/models/vertex.py,sha256=qVD5l1Q538xXUPulxG4nmDjXE1VoV4yuAkTCpIeJVw0,16795
23
23
  aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
24
24
  aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
25
25
  aient/plugins/config.py,sha256=Vp6CG9ocdC_FAlCMEGtKj45xamir76DFxdJVvURNtog,6539
26
- aient/plugins/excute_command.py,sha256=A3WmfZboEikU1EHvtMWhBv-xHxCyMxbDddQ982I_8wE,10482
26
+ aient/plugins/excute_command.py,sha256=huQSbNbeImV8BUIsQKE13BIhCAMr7aYRyXO4saE1dTI,10534
27
27
  aient/plugins/get_time.py,sha256=Ih5XIW5SDAIhrZ9W4Qe5Hs1k4ieKPUc_LAd6ySNyqZk,654
28
28
  aient/plugins/image.py,sha256=ZElCIaZznE06TN9xW3DrSukS7U3A5_cjk1Jge4NzPxw,2072
29
29
  aient/plugins/list_directory.py,sha256=JZVuImecMSfEv6jLqii-0uQJ1UCsrpMNmYlwW3PEDg4,1374
@@ -37,9 +37,9 @@ aient/prompt/__init__.py,sha256=GBtn6-JDT8KHFCcuPpfSNE_aGddg5p4FEyMCy4BfwGs,20
37
37
  aient/prompt/agent.py,sha256=y2GETN6ScC5yQVs75VFfzm4YUWzblbqLYz0Sy6JnPRw,24950
38
38
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
39
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
40
- aient/utils/scripts.py,sha256=wutPtgbs-WXo5AACLpnCJaRQBOSKXWNnsf2grbYDzyQ,29098
41
- aient-1.1.12.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
42
- aient-1.1.12.dist-info/METADATA,sha256=Qw9hW21BsAFZHELCET7zEMTFUvvCaJv0r2WBgJmZM-I,4968
43
- aient-1.1.12.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
44
- aient-1.1.12.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
45
- aient-1.1.12.dist-info/RECORD,,
40
+ aient/utils/scripts.py,sha256=LD8adnfuRrJoY2tWKseXOPJXaxbrUmz4czsnUvHswNY,29096
41
+ aient-1.1.14.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
42
+ aient-1.1.14.dist-info/METADATA,sha256=Cm2dAAwtu6XxjyHCXOgzNRRRdbQAC2iVeCFfJxLM3Wg,4968
43
+ aient-1.1.14.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
44
+ aient-1.1.14.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
45
+ aient-1.1.14.dist-info/RECORD,,
File without changes