aient 1.1.83__py3-none-any.whl → 1.1.85__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/response.py CHANGED
@@ -21,13 +21,12 @@ async def check_response(response, error_log):
21
21
  return {"error": f"{error_log} HTTP Error", "status_code": response.status_code, "details": error_json}
22
22
  return None
23
23
 
24
- async def gemini_json_poccess(response_str):
24
+ async def gemini_json_poccess(response_json):
25
25
  promptTokenCount = 0
26
26
  candidatesTokenCount = 0
27
27
  totalTokenCount = 0
28
28
  image_base64 = None
29
29
 
30
- response_json = await asyncio.to_thread(json.loads, response_str)
31
30
  json_data = safe_get(response_json, "candidates", 0, "content", default=None)
32
31
  finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
33
32
  if finishReason:
@@ -48,7 +47,7 @@ async def gemini_json_poccess(response_str):
48
47
 
49
48
  function_call_name = safe_get(json_data, "functionCall", "name", default=None)
50
49
  function_full_response = safe_get(json_data, "functionCall", "args", default="")
51
- function_full_response = json.dumps(function_full_response) if function_full_response else None
50
+ function_full_response = await asyncio.to_thread(json.dumps, function_full_response) if function_full_response else None
52
51
 
53
52
  blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
54
53
 
@@ -56,7 +55,8 @@ async def gemini_json_poccess(response_str):
56
55
 
57
56
  async def fetch_gemini_response_stream(client, url, headers, payload, model, timeout):
58
57
  timestamp = int(datetime.timestamp(datetime.now()))
59
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
58
+ json_payload = await asyncio.to_thread(json.dumps, payload)
59
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
60
60
  error_message = await check_response(response, "fetch_gemini_response_stream")
61
61
  if error_message:
62
62
  yield error_message
@@ -76,7 +76,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model, tim
76
76
  if line.startswith("data: "):
77
77
  parts_json = line.lstrip("data: ").strip()
78
78
  try:
79
- await asyncio.to_thread(json.loads, parts_json)
79
+ response_json = await asyncio.to_thread(json.loads, parts_json)
80
80
  except json.JSONDecodeError:
81
81
  logger.error(f"JSON decode error: {parts_json}")
82
82
  continue
@@ -84,12 +84,12 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model, tim
84
84
  parts_json += line
85
85
  parts_json = parts_json.lstrip("[,")
86
86
  try:
87
- await asyncio.to_thread(json.loads, parts_json)
87
+ response_json = await asyncio.to_thread(json.loads, parts_json)
88
88
  except json.JSONDecodeError:
89
89
  continue
90
90
 
91
91
  # https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
92
- is_thinking, reasoning_content, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = await gemini_json_poccess(parts_json)
92
+ is_thinking, reasoning_content, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = await gemini_json_poccess(response_json)
93
93
 
94
94
  if is_thinking:
95
95
  sse_string = await generate_sse_response(timestamp, model, reasoning_content=reasoning_content)
@@ -125,7 +125,8 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model, tim
125
125
 
126
126
  async def fetch_vertex_claude_response_stream(client, url, headers, payload, model, timeout):
127
127
  timestamp = int(datetime.timestamp(datetime.now()))
128
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
128
+ json_payload = await asyncio.to_thread(json.dumps, payload)
129
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
129
130
  error_message = await check_response(response, "fetch_vertex_claude_response_stream")
130
131
  if error_message:
131
132
  yield error_message
@@ -198,7 +199,8 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
198
199
  is_thinking = False
199
200
  has_send_thinking = False
200
201
  ark_tag = False
201
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
202
+ json_payload = await asyncio.to_thread(json.dumps, payload)
203
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
202
204
  error_message = await check_response(response, "fetch_gpt_response_stream")
203
205
  if error_message:
204
206
  yield error_message
@@ -312,7 +314,8 @@ async def fetch_azure_response_stream(client, url, headers, payload, timeout):
312
314
  is_thinking = False
313
315
  has_send_thinking = False
314
316
  ark_tag = False
315
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
317
+ json_payload = await asyncio.to_thread(json.dumps, payload)
318
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
316
319
  error_message = await check_response(response, "fetch_azure_response_stream")
317
320
  if error_message:
318
321
  yield error_message
@@ -365,7 +368,8 @@ async def fetch_azure_response_stream(client, url, headers, payload, timeout):
365
368
 
366
369
  async def fetch_cloudflare_response_stream(client, url, headers, payload, model, timeout):
367
370
  timestamp = int(datetime.timestamp(datetime.now()))
368
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
371
+ json_payload = await asyncio.to_thread(json.dumps, payload)
372
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
369
373
  error_message = await check_response(response, "fetch_cloudflare_response_stream")
370
374
  if error_message:
371
375
  yield error_message
@@ -390,7 +394,8 @@ async def fetch_cloudflare_response_stream(client, url, headers, payload, model,
390
394
 
391
395
  async def fetch_cohere_response_stream(client, url, headers, payload, model, timeout):
392
396
  timestamp = int(datetime.timestamp(datetime.now()))
393
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
397
+ json_payload = await asyncio.to_thread(json.dumps, payload)
398
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
394
399
  error_message = await check_response(response, "fetch_cohere_response_stream")
395
400
  if error_message:
396
401
  yield error_message
@@ -413,7 +418,8 @@ async def fetch_cohere_response_stream(client, url, headers, payload, model, tim
413
418
 
414
419
  async def fetch_claude_response_stream(client, url, headers, payload, model, timeout):
415
420
  timestamp = int(datetime.timestamp(datetime.now()))
416
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
421
+ json_payload = await asyncio.to_thread(json.dumps, payload)
422
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
417
423
  error_message = await check_response(response, "fetch_claude_response_stream")
418
424
  if error_message:
419
425
  yield error_message
@@ -466,7 +472,8 @@ async def fetch_claude_response_stream(client, url, headers, payload, model, tim
466
472
 
467
473
  async def fetch_aws_response_stream(client, url, headers, payload, model, timeout):
468
474
  timestamp = int(datetime.timestamp(datetime.now()))
469
- async with client.stream('POST', url, headers=headers, json=payload, timeout=timeout) as response:
475
+ json_payload = await asyncio.to_thread(json.dumps, payload)
476
+ async with client.stream('POST', url, headers=headers, content=json_payload, timeout=timeout) as response:
470
477
  error_message = await check_response(response, "fetch_aws_response_stream")
471
478
  if error_message:
472
479
  yield error_message
@@ -521,7 +528,8 @@ async def fetch_response(client, url, headers, payload, engine, model, timeout=2
521
528
  file = payload.pop("file")
522
529
  response = await client.post(url, headers=headers, data=payload, files={"file": file}, timeout=timeout)
523
530
  else:
524
- response = await client.post(url, headers=headers, json=payload, timeout=timeout)
531
+ json_payload = await asyncio.to_thread(json.dumps, payload)
532
+ response = await client.post(url, headers=headers, content=json_payload, timeout=timeout)
525
533
  error_message = await check_response(response, "fetch_response")
526
534
  if error_message:
527
535
  yield error_message
aient/models/chatgpt.py CHANGED
@@ -236,6 +236,10 @@ class chatgpt(BaseLLM):
236
236
  if type(self.conversation[convo_id][message_index]["content"]) == list \
237
237
  and type(self.conversation[convo_id][message_index + 1]["content"]) == dict:
238
238
  self.conversation[convo_id][message_index + 1]["content"] = [self.conversation[convo_id][message_index + 1]["content"]]
239
+ if type(self.conversation[convo_id][message_index]["content"]) == str \
240
+ and type(self.conversation[convo_id][message_index + 1]["content"]) == str \
241
+ and self.conversation[convo_id][message_index].get("content").endswith(self.conversation[convo_id][message_index + 1].get("content")):
242
+ self.conversation[convo_id][message_index + 1]["content"] = ""
239
243
  self.conversation[convo_id][message_index]["content"] += self.conversation[convo_id][message_index + 1]["content"]
240
244
  self.conversation[convo_id].pop(message_index + 1)
241
245
  conversation_len = conversation_len - 1
@@ -744,7 +748,8 @@ class chatgpt(BaseLLM):
744
748
  need_done_prompt = False
745
749
 
746
750
  # 发送请求并处理响应
747
- for i in range(10):
751
+ retry_times = 0
752
+ while True:
748
753
  tmp_post_json = copy.deepcopy(json_post)
749
754
  if need_done_prompt:
750
755
  tmp_post_json["messages"].extend(need_done_prompt)
@@ -809,7 +814,7 @@ class chatgpt(BaseLLM):
809
814
  return # Stop iteration
810
815
  except httpx.RemoteProtocolError:
811
816
  continue
812
- except httpx.ReadError:
817
+ except httpx.ReadError as e:
813
818
  self.logger.warning(f"{e}, retrying...")
814
819
  continue
815
820
  except APITimeoutError:
@@ -843,7 +848,8 @@ class chatgpt(BaseLLM):
843
848
  error_message = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
844
849
  raise ConfigurationError(error_message)
845
850
  # 最后一次重试失败,向上抛出异常
846
- if i == 10:
851
+ retry_times += 1
852
+ if retry_times == 9:
847
853
  raise RetryFailedError(str(e))
848
854
 
849
855
  def ask_stream(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.83
3
+ Version: 1.1.85
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -3,7 +3,7 @@ aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
3
3
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
4
4
  aient/core/models.py,sha256=KMlCRLjtq1wQHZTJGqnbWhPS2cHq6eLdnk7peKDrzR8,7490
5
5
  aient/core/request.py,sha256=QnDhyrjzcJOEQU2oauMQi_HHMRR5NxdkrX7nn5JMwTc,76675
6
- aient/core/response.py,sha256=ye6Ie5HevXVcH3X5V5BoOC5yDJMBKTKopWQzsCNs008,34977
6
+ aient/core/response.py,sha256=waC-i5i04bwooqNAgjmX30nx0IJ1nLITPeF0RCYwRs0,35620
7
7
  aient/core/utils.py,sha256=okDFj8S4r71vnkEBYxiOqoKee63UzNyk2y_p4uhWBvY,28848
8
8
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
9
9
  aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
@@ -12,7 +12,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
12
12
  aient/models/__init__.py,sha256=ZTiZgbfBPTjIPSKURE7t6hlFBVLRS9lluGbmqc1WjxQ,43
13
13
  aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
14
14
  aient/models/base.py,sha256=-nnihYnx-vHZMqeVO9ljjt3k4FcD3n-iMk4tT-10nRQ,7232
15
- aient/models/chatgpt.py,sha256=4GOuMzmHrywtVIUV6fugzqMkkG_AyS14STPePEKSoeA,45733
15
+ aient/models/chatgpt.py,sha256=Jb0g9FsTitM7U_qG4RZilSgRY18fJ3-1a5GueetjQ8I,46227
16
16
  aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
17
17
  aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
18
18
  aient/plugins/config.py,sha256=TGgZ5SnNKZ8MmdznrZ-TEq7s2ulhAAwTSKH89bci3dA,7079
@@ -30,8 +30,8 @@ aient/plugins/write_file.py,sha256=Jt8fOEwqhYiSWpCbwfAr1xoi_BmFnx3076GMhuL06uI,3
30
30
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
32
32
  aient/utils/scripts.py,sha256=VqtK4RFEx7KxkmcqG3lFDS1DxoNlFFGErEjopVcc8IE,40974
33
- aient-1.1.83.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
34
- aient-1.1.83.dist-info/METADATA,sha256=djB0cSoRmV_HSI4Fzp2FUyZJ4d0Ob1JnJyr6K_mQGDU,4842
35
- aient-1.1.83.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
- aient-1.1.83.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
37
- aient-1.1.83.dist-info/RECORD,,
33
+ aient-1.1.85.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
34
+ aient-1.1.85.dist-info/METADATA,sha256=HxXc6ppmpfa3xS-UYSmYsvoB9xennfmJ8SlJns5X5_U,4842
35
+ aient-1.1.85.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
+ aient-1.1.85.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
37
+ aient-1.1.85.dist-info/RECORD,,
File without changes