aient 1.0.68__py3-none-any.whl → 1.0.70__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/.gitignore ADDED
@@ -0,0 +1 @@
1
+ /__pycache__
aient/core/request.py CHANGED
@@ -21,21 +21,24 @@ from .utils import (
21
21
  )
22
22
 
23
23
  async def get_gemini_payload(request, engine, provider, api_key=None):
24
+ import re
25
+
24
26
  headers = {
25
27
  'Content-Type': 'application/json'
26
28
  }
29
+
30
+ # 获取映射后的实际模型ID
27
31
  model_dict = get_model_dict(provider)
28
32
  original_model = model_dict[request.model]
33
+
29
34
  gemini_stream = "streamGenerateContent"
30
35
  url = provider['base_url']
31
36
  parsed_url = urllib.parse.urlparse(url)
32
- # print("parsed_url", parsed_url)
33
37
  if "/v1beta" in parsed_url.path:
34
38
  api_version = "v1beta"
35
39
  else:
36
40
  api_version = "v1"
37
41
 
38
- # https://generativelanguage.googleapis.com/v1beta/models/
39
42
  url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path.split('/models')[0].rstrip('/')}/models/{original_model}:{gemini_stream}?key={api_key}"
40
43
 
41
44
  messages = []
@@ -96,7 +99,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
96
99
  content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
97
100
  systemInstruction = {"parts": content}
98
101
 
99
- off_models = ["gemini-2.0-flash", "gemini-1.5", "gemini-2.5-pro"]
102
+ off_models = ["gemini-2.0-flash", "gemini-2.5-flash", "gemini-1.5", "gemini-2.5-pro"]
100
103
  if any(off_model in original_model for off_model in off_models):
101
104
  safety_settings = "OFF"
102
105
  else:
@@ -196,7 +199,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
196
199
  else:
197
200
  payload[field] = value
198
201
 
199
- max_token_65k_models = ["gemini-2.5-pro", "gemini-2.0-pro", "gemini-2.0-flash-thinking"]
202
+ max_token_65k_models = ["gemini-2.5-pro", "gemini-2.0-pro", "gemini-2.0-flash-thinking", "gemini-2.5-flash"]
200
203
  payload["generationConfig"] = generation_config
201
204
  if "maxOutputTokens" not in generation_config:
202
205
  if any(pro_model in original_model for pro_model in max_token_65k_models):
@@ -204,15 +207,26 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
204
207
  else:
205
208
  payload["generationConfig"]["maxOutputTokens"] = 8192
206
209
 
210
+ # 从请求模型名中检测思考预算设置
211
+ m = re.match(r".*-think-(-?\d+)", request.model)
212
+ if m:
213
+ try:
214
+ val = int(m.group(1))
215
+ if val < 0:
216
+ val = 0
217
+ elif val > 24576:
218
+ val = 24576
219
+ payload["generationConfig"]["thinkingConfig"] = {"thinkingBudget": val}
220
+ except ValueError:
221
+ # 如果转换为整数失败,忽略思考预算设置
222
+ pass
223
+
224
+ # 检测search标签
207
225
  if request.model.endswith("-search"):
208
226
  if "tools" not in payload:
209
- payload["tools"] = [{
210
- "googleSearch": {}
211
- }]
227
+ payload["tools"] = [{"googleSearch": {}}]
212
228
  else:
213
- payload["tools"].append({
214
- "googleSearch": {}
215
- })
229
+ payload["tools"].append({"googleSearch": {}})
216
230
 
217
231
  return url, headers, payload
218
232
 
@@ -917,7 +931,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
917
931
 
918
932
  for field, value in request.model_dump(exclude_unset=True).items():
919
933
  if field not in miss_fields and value is not None:
920
- if field == "max_tokens" and ("o1" in original_model or "o3" in original_model):
934
+ if field == "max_tokens" and ("o1" in original_model or "o3" in original_model or "o4" in original_model):
921
935
  payload["max_completion_tokens"] = value
922
936
  else:
923
937
  payload[field] = value
@@ -925,29 +939,28 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
925
939
  if provider.get("tools") == False or "o1-mini" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
926
940
  payload.pop("tools", None)
927
941
  payload.pop("tool_choice", None)
942
+
928
943
  if "models.inference.ai.azure.com" in url:
929
944
  payload["stream"] = False
930
- # request.stream = False
931
945
  payload.pop("stream_options", None)
932
946
 
933
947
  if "api.x.ai" in url:
934
948
  payload.pop("stream_options", None)
935
949
 
936
- if "o3-mini" in original_model:
950
+ if "grok-3-mini" in original_model:
937
951
  if request.model.endswith("high"):
938
952
  payload["reasoning_effort"] = "high"
939
953
  elif request.model.endswith("low"):
940
954
  payload["reasoning_effort"] = "low"
941
- else:
942
- payload["reasoning_effort"] = "medium"
943
955
 
944
- if "grok-3-mini" in original_model:
956
+ if "o1" in original_model or "o3" in original_model or "o4" in original_model:
945
957
  if request.model.endswith("high"):
946
958
  payload["reasoning_effort"] = "high"
947
959
  elif request.model.endswith("low"):
948
960
  payload["reasoning_effort"] = "low"
961
+ else:
962
+ payload["reasoning_effort"] = "medium"
949
963
 
950
- if "o3-mini" in original_model or "o1" in original_model:
951
964
  if "temperature" in payload:
952
965
  payload.pop("temperature")
953
966
 
aient/core/response.py CHANGED
@@ -456,42 +456,46 @@ async def fetch_aws_response_stream(client, url, headers, payload, model):
456
456
  yield error_message
457
457
  return
458
458
 
459
+ buffer = ""
459
460
  async for line in response.aiter_text():
460
- if not line or \
461
- line.strip() == "" or\
462
- line.strip().startswith(':content-type') or \
463
- line.strip().startswith(':event-type'): # 过滤掉完全空的行或只有空白的行
464
- continue
465
-
466
- json_match = re.search(r'event{.*?}', line)
467
- if not json_match:
468
- continue
469
- try:
470
- chunk_data = json.loads(json_match.group(0).lstrip('event'))
471
- except json.JSONDecodeError:
472
- logger.error(f"DEBUG json.JSONDecodeError: {json_match.group(0).lstrip('event')!r}")
473
- continue
474
-
475
- # --- 后续处理逻辑不变 ---
476
- if "bytes" in chunk_data:
477
- # 解码 Base64 编码的字节
478
- decoded_bytes = base64.b64decode(chunk_data["bytes"])
479
- # 将解码后的字节再次解析为 JSON
480
- payload_chunk = json.loads(decoded_bytes.decode('utf-8'))
481
- # print(f"DEBUG payload_chunk: {payload_chunk!r}")
482
-
483
- text = safe_get(payload_chunk, "delta", "text", default="")
484
- if text:
485
- sse_string = await generate_sse_response(timestamp, model, text, None, None)
486
- yield sse_string
461
+ buffer += line
462
+ while "\r" in buffer:
463
+ line, buffer = buffer.split("\r", 1)
464
+ if not line or \
465
+ line.strip() == "" or \
466
+ line.strip().startswith(':content-type') or \
467
+ line.strip().startswith(':event-type'): # 过滤掉完全空的行或只有空白的行
468
+ continue
469
+
470
+ json_match = re.search(r'event{.*?}', line)
471
+ if not json_match:
472
+ continue
473
+ try:
474
+ chunk_data = json.loads(json_match.group(0).lstrip('event'))
475
+ except json.JSONDecodeError:
476
+ logger.error(f"DEBUG json.JSONDecodeError: {json_match.group(0).lstrip('event')!r}")
477
+ continue
478
+
479
+ # --- 后续处理逻辑不变 ---
480
+ if "bytes" in chunk_data:
481
+ # 解码 Base64 编码的字节
482
+ decoded_bytes = base64.b64decode(chunk_data["bytes"])
483
+ # 将解码后的字节再次解析为 JSON
484
+ payload_chunk = json.loads(decoded_bytes.decode('utf-8'))
485
+ # print(f"DEBUG payload_chunk: {payload_chunk!r}")
486
+
487
+ text = safe_get(payload_chunk, "delta", "text", default="")
488
+ if text:
489
+ sse_string = await generate_sse_response(timestamp, model, text, None, None)
490
+ yield sse_string
487
491
 
488
- usage = safe_get(payload_chunk, "amazon-bedrock-invocationMetrics", default="")
489
- if usage:
490
- input_tokens = usage.get("inputTokenCount", 0)
491
- output_tokens = usage.get("outputTokenCount", 0)
492
- total_tokens = input_tokens + output_tokens
493
- sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, total_tokens, input_tokens, output_tokens)
494
- yield sse_string
492
+ usage = safe_get(payload_chunk, "amazon-bedrock-invocationMetrics", default="")
493
+ if usage:
494
+ input_tokens = usage.get("inputTokenCount", 0)
495
+ output_tokens = usage.get("outputTokenCount", 0)
496
+ total_tokens = input_tokens + output_tokens
497
+ sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, total_tokens, input_tokens, output_tokens)
498
+ yield sse_string
495
499
 
496
500
  yield "data: [DONE]" + end_of_line
497
501
 
@@ -538,7 +542,7 @@ async def fetch_response(client, url, headers, payload, engine, model):
538
542
  if role == "model":
539
543
  role = "assistant"
540
544
  else:
541
- logger.error(f"Unknown role: {role}")
545
+ logger.error(f"Unknown role: {role}, parsed_data: {parsed_data}")
542
546
  role = "assistant"
543
547
 
544
548
  function_call_name = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", 0, "functionCall", "name", default=None)
aient/core/utils.py CHANGED
@@ -91,6 +91,7 @@ def get_engine(provider, endpoint=None, original_model=""):
91
91
  and "deepseek" not in original_model \
92
92
  and "o1" not in original_model \
93
93
  and "o3" not in original_model \
94
+ and "o4" not in original_model \
94
95
  and "gemini" not in original_model \
95
96
  and "learnlm" not in original_model \
96
97
  and "grok" not in original_model \
@@ -7,7 +7,15 @@ def excute_command(command):
7
7
  """
8
8
  执行命令并返回输出结果
9
9
  禁止用于查看pdf,禁止使用 pdftotext 命令
10
- 请确保生成的命令字符串可以直接在终端执行,特殊字符(例如 &&)必须保持原样,不要进行 HTML 编码或任何形式的转义,比如禁止使用 &amp;&amp; 代替 &&。
10
+ 请确保生成的命令字符串可以直接在终端执行,特殊字符(例如 &&)必须保持原样,不要进行 HTML 编码或任何形式的转义,禁止使用 &amp;&amp;
11
+
12
+ for example:
13
+
14
+ correct:
15
+ ls -l && echo 'Hello, World!'
16
+
17
+ incorrect:
18
+ ls -l &amp;&amp; echo 'Hello, World!'
11
19
 
12
20
  参数:
13
21
  command: 要执行的命令,可以克隆仓库,安装依赖,运行代码等
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.0.68
3
+ Version: 1.0.70
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -1,11 +1,12 @@
1
1
  aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
2
2
  aient/core/.git,sha256=lrAcW1SxzRBUcUiuKL5tS9ykDmmTXxyLP3YYU-Y-Q-I,45
3
+ aient/core/.gitignore,sha256=5JRRlYYsqt_yt6iFvvzhbqh2FTUQMqwo6WwIuFzlGR8,13
3
4
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
4
5
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
5
6
  aient/core/models.py,sha256=_1wYZg_n9kb2A3C8xCboyqleH2iHc9scwOvtx9DPeok,7582
6
- aient/core/request.py,sha256=025Jih6jUhlCnO_JT0si5ygbxB1h-aMUPw6ZhL8mJhk,60362
7
- aient/core/response.py,sha256=EYlTrpMOInM9IF0uM954xQ6QDca1u33UVcYHfoMomHE,30307
8
- aient/core/utils.py,sha256=8rZaTu3PA9UyEqeFd-Oxm7VGBO59CzOzcII_UIzmZJY,26035
7
+ aient/core/request.py,sha256=qE7UFkPNqVQvM0jD9HdcrmpLKJuRYP3rZVqd6aeb8HE,60767
8
+ aient/core/response.py,sha256=CSAOQmybBu3i2Yq2YUyi91dZWTN1tmtJEBTI8RFwj_s,30594
9
+ aient/core/utils.py,sha256=R4oKj1oVsdfePdQReaW-ZU4VRTCetvdCKPz_OOTOU_w,26072
9
10
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
10
11
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
11
12
  aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhFkw,2755
@@ -21,7 +22,7 @@ aient/models/vertex.py,sha256=qVD5l1Q538xXUPulxG4nmDjXE1VoV4yuAkTCpIeJVw0,16795
21
22
  aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
22
23
  aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
23
24
  aient/plugins/config.py,sha256=tFN54y9pVgK46e9VAOux9h69XuoKJQeTGTAdFKpPPnc,7813
24
- aient/plugins/excute_command.py,sha256=rFvL91vm_xazNd0mU_oeNQ1N2U1Q80MyZO-A-v2xULs,1124
25
+ aient/plugins/excute_command.py,sha256=0sQZGLCaRfh0yMM63fXU8XZV7jlo9FKeyEbM-rfgTFg,1209
25
26
  aient/plugins/get_time.py,sha256=Ih5XIW5SDAIhrZ9W4Qe5Hs1k4ieKPUc_LAd6ySNyqZk,654
26
27
  aient/plugins/image.py,sha256=ZElCIaZznE06TN9xW3DrSukS7U3A5_cjk1Jge4NzPxw,2072
27
28
  aient/plugins/list_directory.py,sha256=5ubm-mfrj-tanGSDp4M_Tmb6vQb3dx2-XVfQ2yL2G8A,1394
@@ -35,8 +36,8 @@ aient/prompt/agent.py,sha256=ssZREegzmkWNW20dxzGO92J8Y5v7yjRTR9znTPDjo5Q,23681
35
36
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
37
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
37
38
  aient/utils/scripts.py,sha256=n0jR5eXCBIK12W4bIx-xU1FVl1hZ4zDC7hq_BWQHYJU,27537
38
- aient-1.0.68.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
39
- aient-1.0.68.dist-info/METADATA,sha256=ybmAd9zAXxJwW78DBy9o9lk4n3ulW52EazBSkt7C7t8,5000
40
- aient-1.0.68.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
41
- aient-1.0.68.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
42
- aient-1.0.68.dist-info/RECORD,,
39
+ aient-1.0.70.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
40
+ aient-1.0.70.dist-info/METADATA,sha256=E8QxcSZFqwXKyDLJUwD8JuTIWk3JupLbVPLaqKgxioU,5000
41
+ aient-1.0.70.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
42
+ aient-1.0.70.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
43
+ aient-1.0.70.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (78.1.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5