aient 1.1.76__py3-none-any.whl → 1.1.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/request.py CHANGED
@@ -3,9 +3,9 @@ import json
3
3
  import httpx
4
4
  import base64
5
5
  import asyncio
6
- import urllib.parse
7
6
  from io import IOBase
8
7
  from typing import Tuple
8
+ from urllib.parse import urlparse
9
9
 
10
10
  from .models import RequestModel, Message
11
11
  from .utils import (
@@ -41,7 +41,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
41
41
  else:
42
42
  gemini_stream = "generateContent"
43
43
  url = provider['base_url']
44
- parsed_url = urllib.parse.urlparse(url)
44
+ parsed_url = urlparse(url)
45
45
  if "/v1beta" in parsed_url.path:
46
46
  api_version = "v1beta"
47
47
  else:
@@ -1952,21 +1952,40 @@ async def get_embedding_payload(request, engine, provider, api_key=None):
1952
1952
  headers = {
1953
1953
  "Content-Type": "application/json",
1954
1954
  }
1955
- if api_key:
1956
- headers['Authorization'] = f"Bearer {api_key}"
1957
- url = provider['base_url']
1958
- url = BaseAPI(url).embeddings
1959
1955
 
1960
- payload = {
1961
- "input": request.input,
1962
- "model": original_model,
1963
- }
1956
+ url = provider['base_url']
1957
+ parsed_url = urlparse(url)
1958
+ if "embedding-00" in original_model and "127.0.0.1" not in url and \
1959
+ (parsed_url.path.endswith("/v1beta") or \
1960
+ parsed_url.path.endswith("/v1") or \
1961
+ (parsed_url.netloc == 'generativelanguage.googleapis.com' and "openai/chat/completions" not in parsed_url.path)):
1962
+ if api_key:
1963
+ headers['x-goog-api-key'] = f"{api_key}"
1964
+ parsed_url = urllib.parse.urlparse(url)
1965
+ url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path.split('/models')[0].rstrip('/')}/models/{original_model}:embedContent"
1966
+ payload = {
1967
+ "content": {
1968
+ "parts": [
1969
+ {
1970
+ "text": request.input
1971
+ }
1972
+ ]
1973
+ }
1974
+ }
1975
+ else:
1976
+ if api_key:
1977
+ headers['Authorization'] = f"Bearer {api_key}"
1978
+ url = BaseAPI(url).embeddings
1979
+ payload = {
1980
+ "input": request.input,
1981
+ "model": original_model,
1982
+ }
1964
1983
 
1965
- if request.encoding_format:
1966
- if url.startswith("https://api.jina.ai"):
1967
- payload["embedding_type"] = request.encoding_format
1968
- else:
1969
- payload["encoding_format"] = request.encoding_format
1984
+ if request.encoding_format:
1985
+ if url.startswith("https://api.jina.ai"):
1986
+ payload["embedding_type"] = request.encoding_format
1987
+ else:
1988
+ payload["encoding_format"] = request.encoding_format
1970
1989
 
1971
1990
  return url, headers, payload
1972
1991
 
aient/core/response.py CHANGED
@@ -626,6 +626,28 @@ async def fetch_response(client, url, headers, payload, engine, model, timeout=2
626
626
  response_json = await asyncio.to_thread(json.loads, response_bytes)
627
627
  content = safe_get(response_json, "output", "choices", 0, "message", "content", 0, default=None)
628
628
  yield content
629
+
630
+ elif "embedContent" in url:
631
+ response_bytes = await response.aread()
632
+ response_json = await asyncio.to_thread(json.loads, response_bytes)
633
+ content = safe_get(response_json, "embedding", "values", default=[])
634
+ response_embedContent = {
635
+ "object": "list",
636
+ "data": [
637
+ {
638
+ "object": "embedding",
639
+ "embedding":content,
640
+ "index": 0
641
+ }
642
+ ],
643
+ "model": model,
644
+ "usage": {
645
+ "prompt_tokens": 0,
646
+ "total_tokens": 0
647
+ }
648
+ }
649
+
650
+ yield response_embedContent
629
651
  else:
630
652
  response_bytes = await response.aread()
631
653
  response_json = await asyncio.to_thread(json.loads, response_bytes)
aient/core/utils.py CHANGED
@@ -518,7 +518,7 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
518
518
  sample_data["choices"][0]["delta"] = {}
519
519
  sample_data["choices"][0]["finish_reason"] = stop
520
520
 
521
- json_data = json.dumps(sample_data, ensure_ascii=False)
521
+ json_data = await asyncio.to_thread(json.dumps, sample_data, ensure_ascii=False)
522
522
  # print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
523
523
 
524
524
  # 构建SSE响应
@@ -557,6 +557,9 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
557
557
  if function_call_name:
558
558
  if not tools_id:
559
559
  tools_id = f"call_{random_str}"
560
+
561
+ arguments_json = await asyncio.to_thread(json.dumps, function_call_content, ensure_ascii=False)
562
+
560
563
  sample_data = {
561
564
  "id": f"chatcmpl-{random_str}",
562
565
  "object": "chat.completion",
@@ -574,7 +577,7 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
574
577
  "type": "function",
575
578
  "function": {
576
579
  "name": function_call_name,
577
- "arguments": json.dumps(function_call_content, ensure_ascii=False)
580
+ "arguments": arguments_json
578
581
  }
579
582
  }
580
583
  ],
@@ -605,7 +608,7 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
605
608
  if total_tokens:
606
609
  sample_data["usage"] = {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
607
610
 
608
- json_data = json.dumps(sample_data, ensure_ascii=False)
611
+ json_data = await asyncio.to_thread(json.dumps, sample_data, ensure_ascii=False)
609
612
  # print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
610
613
 
611
614
  return json_data
aient/models/chatgpt.py CHANGED
@@ -33,6 +33,18 @@ class ModelNotFoundError(Exception):
33
33
  """Custom exception for model not found (404) errors."""
34
34
  pass
35
35
 
36
+ class RateLimitError(Exception):
37
+ """Custom exception for rate limit (429) errors."""
38
+ pass
39
+
40
+ class ConfigurationError(Exception):
41
+ """Custom exception for configuration errors."""
42
+ pass
43
+
44
+ class RetryFailedError(Exception):
45
+ """Custom exception for retry failures."""
46
+ pass
47
+
36
48
  class TaskComplete(Exception):
37
49
  """Exception-like signal to indicate the task is complete."""
38
50
  def __init__(self, message):
@@ -727,7 +739,7 @@ class chatgpt(BaseLLM):
727
739
  need_done_prompt = False
728
740
 
729
741
  # 发送请求并处理响应
730
- for i in range(30):
742
+ for i in range(10):
731
743
  tmp_post_json = copy.deepcopy(json_post)
732
744
  if need_done_prompt:
733
745
  tmp_post_json["messages"].extend(need_done_prompt)
@@ -778,19 +790,24 @@ class chatgpt(BaseLLM):
778
790
  raise APITimeoutError("Response timeout")
779
791
  if "HTTP Error', 'status_code': 404" in processed_chunk:
780
792
  raise ModelNotFoundError(f"Model: {model or self.engine} not found!")
793
+ if "HTTP Error', 'status_code': 429" in processed_chunk:
794
+ raise RateLimitError(f"Rate limit exceeded for model: {model or self.engine}")
781
795
  yield processed_chunk
782
796
  index += 1
783
797
 
784
798
  # 成功处理,跳出重试循环
785
799
  break
786
- except (httpx.ConnectError, httpx.ReadTimeout):
787
- self.logger.error("连接或读取超时错误,请检查服务器状态或网络连接。")
800
+ except (httpx.ConnectError, httpx.ReadTimeout, httpx.PoolTimeout):
801
+ self.logger.error("Connection or read timeout.")
788
802
  return # Stop iteration
789
803
  except httpx.RemoteProtocolError:
790
804
  continue
791
805
  except APITimeoutError:
792
806
  self.logger.warning("API response timeout (524), retrying...")
793
807
  continue
808
+ except RateLimitError as e:
809
+ self.logger.warning(f"{e}, retrying...")
810
+ continue
794
811
  except ValidationError as e:
795
812
  self.logger.warning(f"Validation failed: {e}. Retrying with corrective prompt.")
796
813
  need_done_prompt = [
@@ -811,10 +828,10 @@ class chatgpt(BaseLLM):
811
828
  self.logger.error(traceback.format_exc())
812
829
  if "Invalid URL" in str(e):
813
830
  error_message = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
814
- raise Exception(json.dumps({"type": "configuration_error", "message": error_message}, ensure_ascii=False))
831
+ raise ConfigurationError(error_message)
815
832
  # 最后一次重试失败,向上抛出异常
816
- if i == 11:
817
- raise Exception(json.dumps({"type": "retry_failed", "message": str(e)}, ensure_ascii=False))
833
+ if i == 10:
834
+ raise RetryFailedError(str(e))
818
835
 
819
836
  def ask_stream(
820
837
  self,
@@ -941,4 +958,4 @@ class chatgpt(BaseLLM):
941
958
  {"role": "system", "content": self.system_prompt},
942
959
  ]
943
960
  self.tokens_usage[convo_id] = 0
944
- self.current_tokens[convo_id] = 0
961
+ self.current_tokens[convo_id] = 0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.76
3
+ Version: 1.1.78
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -2,9 +2,9 @@ aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
2
2
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
3
3
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
4
4
  aient/core/models.py,sha256=KMlCRLjtq1wQHZTJGqnbWhPS2cHq6eLdnk7peKDrzR8,7490
5
- aient/core/request.py,sha256=vfwi3ZGYp2hQzSJ6mPXJVgcV_uu5AJ_NAL84mLfF8WA,76674
6
- aient/core/response.py,sha256=vQFuc3amHiD1hv_OiINRJnh33n79PnbdzMSBSRlqR5E,34309
7
- aient/core/utils.py,sha256=D98d5Cy1h4ejKtuxS0EEDtL4YqpaZLB5tuXoVP0IBWQ,28462
5
+ aient/core/request.py,sha256=nSLKNI-T6lI4yv873eQUElz5_fkJVy1LbOy27NiJPNk,77512
6
+ aient/core/response.py,sha256=ye6Ie5HevXVcH3X5V5BoOC5yDJMBKTKopWQzsCNs008,34977
7
+ aient/core/utils.py,sha256=wCPw4upG4-ciBxGyEXeTEHzxTbm38rVHrr4PIu8hDW8,28579
8
8
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
9
9
  aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
10
10
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -12,7 +12,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
12
12
  aient/models/__init__.py,sha256=ZTiZgbfBPTjIPSKURE7t6hlFBVLRS9lluGbmqc1WjxQ,43
13
13
  aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
14
14
  aient/models/base.py,sha256=-nnihYnx-vHZMqeVO9ljjt3k4FcD3n-iMk4tT-10nRQ,7232
15
- aient/models/chatgpt.py,sha256=q62B6cbtHqKrqsQjM24k_1wi_5-UiuxkXa7e2yG_Clg,44661
15
+ aient/models/chatgpt.py,sha256=MLoDnKERg_5GkLG3q0Bzh-rneHcTbuBhHNph03YOVnw,45108
16
16
  aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
17
17
  aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
18
18
  aient/plugins/config.py,sha256=TGgZ5SnNKZ8MmdznrZ-TEq7s2ulhAAwTSKH89bci3dA,7079
@@ -30,8 +30,8 @@ aient/plugins/write_file.py,sha256=Jt8fOEwqhYiSWpCbwfAr1xoi_BmFnx3076GMhuL06uI,3
30
30
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
32
32
  aient/utils/scripts.py,sha256=VqtK4RFEx7KxkmcqG3lFDS1DxoNlFFGErEjopVcc8IE,40974
33
- aient-1.1.76.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
34
- aient-1.1.76.dist-info/METADATA,sha256=nOBPFlGsNRfFqblnwjC4Z36Dq8TkUMcsdTDrI9Gcm8E,4842
35
- aient-1.1.76.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
- aient-1.1.76.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
37
- aient-1.1.76.dist-info/RECORD,,
33
+ aient-1.1.78.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
34
+ aient-1.1.78.dist-info/METADATA,sha256=0U1QX6xBqPt0B26kwN340cm3TG3K8GWdjQs81eThnbA,4842
35
+ aient-1.1.78.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
36
+ aient-1.1.78.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
37
+ aient-1.1.78.dist-info/RECORD,,
File without changes