aient 1.1.76__tar.gz → 1.1.78__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aient-1.1.76 → aient-1.1.78}/PKG-INFO +1 -1
- {aient-1.1.76 → aient-1.1.78}/aient/core/request.py +34 -15
- {aient-1.1.76 → aient-1.1.78}/aient/core/response.py +22 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/utils.py +6 -3
- {aient-1.1.76 → aient-1.1.78}/aient/models/chatgpt.py +24 -7
- {aient-1.1.76 → aient-1.1.78}/aient.egg-info/PKG-INFO +1 -1
- {aient-1.1.76 → aient-1.1.78}/pyproject.toml +1 -1
- {aient-1.1.76 → aient-1.1.78}/LICENSE +0 -0
- {aient-1.1.76 → aient-1.1.78}/README.md +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/__init__.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/__init__.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/log_config.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/models.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/test/test_base_api.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/test/test_geminimask.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/test/test_image.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/core/test/test_payload.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/models/__init__.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/models/audio.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/models/base.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/__init__.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/arXiv.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/config.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/excute_command.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/get_time.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/image.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/list_directory.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/read_file.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/read_image.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/readonly.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/registry.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/run_python.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/websearch.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/plugins/write_file.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/utils/__init__.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/utils/prompt.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient/utils/scripts.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient.egg-info/SOURCES.txt +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient.egg-info/dependency_links.txt +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient.egg-info/requires.txt +0 -0
- {aient-1.1.76 → aient-1.1.78}/aient.egg-info/top_level.txt +0 -0
- {aient-1.1.76 → aient-1.1.78}/setup.cfg +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_Web_crawler.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_ddg_search.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_google_search.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_ollama.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_plugin.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_search.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_url.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_whisper.py +0 -0
- {aient-1.1.76 → aient-1.1.78}/test/test_yjh.py +0 -0
@@ -3,9 +3,9 @@ import json
|
|
3
3
|
import httpx
|
4
4
|
import base64
|
5
5
|
import asyncio
|
6
|
-
import urllib.parse
|
7
6
|
from io import IOBase
|
8
7
|
from typing import Tuple
|
8
|
+
from urllib.parse import urlparse
|
9
9
|
|
10
10
|
from .models import RequestModel, Message
|
11
11
|
from .utils import (
|
@@ -41,7 +41,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
41
41
|
else:
|
42
42
|
gemini_stream = "generateContent"
|
43
43
|
url = provider['base_url']
|
44
|
-
parsed_url =
|
44
|
+
parsed_url = urlparse(url)
|
45
45
|
if "/v1beta" in parsed_url.path:
|
46
46
|
api_version = "v1beta"
|
47
47
|
else:
|
@@ -1952,21 +1952,40 @@ async def get_embedding_payload(request, engine, provider, api_key=None):
|
|
1952
1952
|
headers = {
|
1953
1953
|
"Content-Type": "application/json",
|
1954
1954
|
}
|
1955
|
-
if api_key:
|
1956
|
-
headers['Authorization'] = f"Bearer {api_key}"
|
1957
|
-
url = provider['base_url']
|
1958
|
-
url = BaseAPI(url).embeddings
|
1959
1955
|
|
1960
|
-
|
1961
|
-
|
1962
|
-
|
1963
|
-
|
1956
|
+
url = provider['base_url']
|
1957
|
+
parsed_url = urlparse(url)
|
1958
|
+
if "embedding-00" in original_model and "127.0.0.1" not in url and \
|
1959
|
+
(parsed_url.path.endswith("/v1beta") or \
|
1960
|
+
parsed_url.path.endswith("/v1") or \
|
1961
|
+
(parsed_url.netloc == 'generativelanguage.googleapis.com' and "openai/chat/completions" not in parsed_url.path)):
|
1962
|
+
if api_key:
|
1963
|
+
headers['x-goog-api-key'] = f"{api_key}"
|
1964
|
+
parsed_url = urllib.parse.urlparse(url)
|
1965
|
+
url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path.split('/models')[0].rstrip('/')}/models/{original_model}:embedContent"
|
1966
|
+
payload = {
|
1967
|
+
"content": {
|
1968
|
+
"parts": [
|
1969
|
+
{
|
1970
|
+
"text": request.input
|
1971
|
+
}
|
1972
|
+
]
|
1973
|
+
}
|
1974
|
+
}
|
1975
|
+
else:
|
1976
|
+
if api_key:
|
1977
|
+
headers['Authorization'] = f"Bearer {api_key}"
|
1978
|
+
url = BaseAPI(url).embeddings
|
1979
|
+
payload = {
|
1980
|
+
"input": request.input,
|
1981
|
+
"model": original_model,
|
1982
|
+
}
|
1964
1983
|
|
1965
|
-
|
1966
|
-
|
1967
|
-
|
1968
|
-
|
1969
|
-
|
1984
|
+
if request.encoding_format:
|
1985
|
+
if url.startswith("https://api.jina.ai"):
|
1986
|
+
payload["embedding_type"] = request.encoding_format
|
1987
|
+
else:
|
1988
|
+
payload["encoding_format"] = request.encoding_format
|
1970
1989
|
|
1971
1990
|
return url, headers, payload
|
1972
1991
|
|
@@ -626,6 +626,28 @@ async def fetch_response(client, url, headers, payload, engine, model, timeout=2
|
|
626
626
|
response_json = await asyncio.to_thread(json.loads, response_bytes)
|
627
627
|
content = safe_get(response_json, "output", "choices", 0, "message", "content", 0, default=None)
|
628
628
|
yield content
|
629
|
+
|
630
|
+
elif "embedContent" in url:
|
631
|
+
response_bytes = await response.aread()
|
632
|
+
response_json = await asyncio.to_thread(json.loads, response_bytes)
|
633
|
+
content = safe_get(response_json, "embedding", "values", default=[])
|
634
|
+
response_embedContent = {
|
635
|
+
"object": "list",
|
636
|
+
"data": [
|
637
|
+
{
|
638
|
+
"object": "embedding",
|
639
|
+
"embedding":content,
|
640
|
+
"index": 0
|
641
|
+
}
|
642
|
+
],
|
643
|
+
"model": model,
|
644
|
+
"usage": {
|
645
|
+
"prompt_tokens": 0,
|
646
|
+
"total_tokens": 0
|
647
|
+
}
|
648
|
+
}
|
649
|
+
|
650
|
+
yield response_embedContent
|
629
651
|
else:
|
630
652
|
response_bytes = await response.aread()
|
631
653
|
response_json = await asyncio.to_thread(json.loads, response_bytes)
|
@@ -518,7 +518,7 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
|
|
518
518
|
sample_data["choices"][0]["delta"] = {}
|
519
519
|
sample_data["choices"][0]["finish_reason"] = stop
|
520
520
|
|
521
|
-
json_data = json.dumps
|
521
|
+
json_data = await asyncio.to_thread(json.dumps, sample_data, ensure_ascii=False)
|
522
522
|
# print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
|
523
523
|
|
524
524
|
# 构建SSE响应
|
@@ -557,6 +557,9 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
|
|
557
557
|
if function_call_name:
|
558
558
|
if not tools_id:
|
559
559
|
tools_id = f"call_{random_str}"
|
560
|
+
|
561
|
+
arguments_json = await asyncio.to_thread(json.dumps, function_call_content, ensure_ascii=False)
|
562
|
+
|
560
563
|
sample_data = {
|
561
564
|
"id": f"chatcmpl-{random_str}",
|
562
565
|
"object": "chat.completion",
|
@@ -574,7 +577,7 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
|
|
574
577
|
"type": "function",
|
575
578
|
"function": {
|
576
579
|
"name": function_call_name,
|
577
|
-
"arguments":
|
580
|
+
"arguments": arguments_json
|
578
581
|
}
|
579
582
|
}
|
580
583
|
],
|
@@ -605,7 +608,7 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
|
|
605
608
|
if total_tokens:
|
606
609
|
sample_data["usage"] = {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
|
607
610
|
|
608
|
-
json_data = json.dumps
|
611
|
+
json_data = await asyncio.to_thread(json.dumps, sample_data, ensure_ascii=False)
|
609
612
|
# print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
|
610
613
|
|
611
614
|
return json_data
|
@@ -33,6 +33,18 @@ class ModelNotFoundError(Exception):
|
|
33
33
|
"""Custom exception for model not found (404) errors."""
|
34
34
|
pass
|
35
35
|
|
36
|
+
class RateLimitError(Exception):
|
37
|
+
"""Custom exception for rate limit (429) errors."""
|
38
|
+
pass
|
39
|
+
|
40
|
+
class ConfigurationError(Exception):
|
41
|
+
"""Custom exception for configuration errors."""
|
42
|
+
pass
|
43
|
+
|
44
|
+
class RetryFailedError(Exception):
|
45
|
+
"""Custom exception for retry failures."""
|
46
|
+
pass
|
47
|
+
|
36
48
|
class TaskComplete(Exception):
|
37
49
|
"""Exception-like signal to indicate the task is complete."""
|
38
50
|
def __init__(self, message):
|
@@ -727,7 +739,7 @@ class chatgpt(BaseLLM):
|
|
727
739
|
need_done_prompt = False
|
728
740
|
|
729
741
|
# 发送请求并处理响应
|
730
|
-
for i in range(
|
742
|
+
for i in range(10):
|
731
743
|
tmp_post_json = copy.deepcopy(json_post)
|
732
744
|
if need_done_prompt:
|
733
745
|
tmp_post_json["messages"].extend(need_done_prompt)
|
@@ -778,19 +790,24 @@ class chatgpt(BaseLLM):
|
|
778
790
|
raise APITimeoutError("Response timeout")
|
779
791
|
if "HTTP Error', 'status_code': 404" in processed_chunk:
|
780
792
|
raise ModelNotFoundError(f"Model: {model or self.engine} not found!")
|
793
|
+
if "HTTP Error', 'status_code': 429" in processed_chunk:
|
794
|
+
raise RateLimitError(f"Rate limit exceeded for model: {model or self.engine}")
|
781
795
|
yield processed_chunk
|
782
796
|
index += 1
|
783
797
|
|
784
798
|
# 成功处理,跳出重试循环
|
785
799
|
break
|
786
|
-
except (httpx.ConnectError, httpx.ReadTimeout):
|
787
|
-
self.logger.error("
|
800
|
+
except (httpx.ConnectError, httpx.ReadTimeout, httpx.PoolTimeout):
|
801
|
+
self.logger.error("Connection or read timeout.")
|
788
802
|
return # Stop iteration
|
789
803
|
except httpx.RemoteProtocolError:
|
790
804
|
continue
|
791
805
|
except APITimeoutError:
|
792
806
|
self.logger.warning("API response timeout (524), retrying...")
|
793
807
|
continue
|
808
|
+
except RateLimitError as e:
|
809
|
+
self.logger.warning(f"{e}, retrying...")
|
810
|
+
continue
|
794
811
|
except ValidationError as e:
|
795
812
|
self.logger.warning(f"Validation failed: {e}. Retrying with corrective prompt.")
|
796
813
|
need_done_prompt = [
|
@@ -811,10 +828,10 @@ class chatgpt(BaseLLM):
|
|
811
828
|
self.logger.error(traceback.format_exc())
|
812
829
|
if "Invalid URL" in str(e):
|
813
830
|
error_message = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
814
|
-
raise
|
831
|
+
raise ConfigurationError(error_message)
|
815
832
|
# 最后一次重试失败,向上抛出异常
|
816
|
-
if i ==
|
817
|
-
raise
|
833
|
+
if i == 10:
|
834
|
+
raise RetryFailedError(str(e))
|
818
835
|
|
819
836
|
def ask_stream(
|
820
837
|
self,
|
@@ -941,4 +958,4 @@ class chatgpt(BaseLLM):
|
|
941
958
|
{"role": "system", "content": self.system_prompt},
|
942
959
|
]
|
943
960
|
self.tokens_usage[convo_id] = 0
|
944
|
-
self.current_tokens[convo_id] = 0
|
961
|
+
self.current_tokens[convo_id] = 0
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|