beswarm 0.2.50__py3-none-any.whl → 0.2.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of beswarm might be problematic. Click here for more details.
- beswarm/agents/chatgroup.py +2 -2
- beswarm/agents/planact.py +2 -2
- beswarm/aient/{src/aient → aient}/core/response.py +28 -38
- beswarm/aient/{src/aient → aient}/core/utils.py +8 -2
- beswarm/aient/aient/models/__init__.py +2 -0
- beswarm/aient/{src/aient → aient}/models/base.py +31 -15
- beswarm/aient/{src/aient → aient}/models/chatgpt.py +119 -186
- beswarm/aient/{src/aient → aient}/plugins/excute_command.py +1 -1
- beswarm/aient/{src/aient → aient}/plugins/read_file.py +1 -1
- beswarm/aient/{src/aient → aient}/plugins/websearch.py +1 -1
- beswarm/aient/{src/aient → aient}/plugins/write_file.py +1 -1
- beswarm/aient/{src/aient → aient}/utils/scripts.py +3 -1
- beswarm/knowledge_graph.py +47 -2
- beswarm/taskmanager.py +1 -1
- beswarm/tools/__init__.py +1 -1
- beswarm/tools/click.py +3 -3
- beswarm/tools/completion.py +1 -1
- beswarm/tools/edit_file.py +2 -2
- beswarm/tools/graph.py +1 -1
- beswarm/tools/planner.py +3 -3
- beswarm/tools/repomap.py +1 -1
- beswarm/tools/request_input.py +1 -1
- beswarm/tools/screenshot.py +1 -1
- beswarm/tools/search_arxiv.py +1 -1
- beswarm/tools/search_web.py +1 -1
- beswarm/tools/subtasks.py +1 -1
- beswarm/tools/worker.py +1 -1
- beswarm/tools/write_csv.py +1 -1
- beswarm/utils.py +1 -1
- {beswarm-0.2.50.dist-info → beswarm-0.2.52.dist-info}/METADATA +1 -1
- beswarm-0.2.52.dist-info/RECORD +122 -0
- beswarm/aient/main.py +0 -50
- beswarm/aient/setup.py +0 -15
- beswarm/aient/src/aient/models/__init__.py +0 -9
- beswarm/aient/src/aient/models/claude.py +0 -573
- beswarm/aient/src/aient/models/duckduckgo.py +0 -241
- beswarm/aient/src/aient/models/gemini.py +0 -357
- beswarm/aient/src/aient/models/groq.py +0 -234
- beswarm/aient/src/aient/models/vertex.py +0 -420
- beswarm/aient/test/chatgpt.py +0 -161
- beswarm/aient/test/claude.py +0 -32
- beswarm/aient/test/test.py +0 -2
- beswarm/aient/test/test_API.py +0 -6
- beswarm/aient/test/test_Deepbricks.py +0 -20
- beswarm/aient/test/test_aiwaves.py +0 -25
- beswarm/aient/test/test_aiwaves_arxiv.py +0 -19
- beswarm/aient/test/test_ask_gemini.py +0 -8
- beswarm/aient/test/test_class.py +0 -17
- beswarm/aient/test/test_claude.py +0 -23
- beswarm/aient/test/test_claude_zh_char.py +0 -26
- beswarm/aient/test/test_download_pdf.py +0 -56
- beswarm/aient/test/test_gemini.py +0 -97
- beswarm/aient/test/test_get_token_dict.py +0 -21
- beswarm/aient/test/test_jieba.py +0 -32
- beswarm/aient/test/test_json.py +0 -65
- beswarm/aient/test/test_logging.py +0 -32
- beswarm/aient/test/test_py_run.py +0 -26
- beswarm/aient/test/test_requests.py +0 -162
- beswarm/aient/test/test_tikitoken.py +0 -19
- beswarm/aient/test/test_token.py +0 -94
- beswarm/aient/test/test_wildcard.py +0 -20
- beswarm-0.2.50.dist-info/RECORD +0 -151
- /beswarm/aient/{src/aient → aient}/__init__.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/__init__.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/log_config.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/models.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/request.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/test/test_base_api.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/test/test_geminimask.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/test/test_image.py +0 -0
- /beswarm/aient/{src/aient → aient}/core/test/test_payload.py +0 -0
- /beswarm/aient/{src/aient → aient}/models/audio.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/__init__.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/arXiv.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/config.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/get_time.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/image.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/list_directory.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/read_image.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/readonly.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/registry.py +0 -0
- /beswarm/aient/{src/aient → aient}/plugins/run_python.py +0 -0
- /beswarm/aient/{src/aient → aient}/utils/__init__.py +0 -0
- /beswarm/aient/{src/aient → aient}/utils/prompt.py +0 -0
- {beswarm-0.2.50.dist-info → beswarm-0.2.52.dist-info}/WHEEL +0 -0
- {beswarm-0.2.50.dist-info → beswarm-0.2.52.dist-info}/top_level.txt +0 -0
beswarm/agents/chatgroup.py
CHANGED
|
@@ -15,8 +15,8 @@ from typing import List, Dict, Union
|
|
|
15
15
|
from ..broker import MessageBroker
|
|
16
16
|
from ..bemcp.bemcp import MCPManager
|
|
17
17
|
from ..utils import register_mcp_tools
|
|
18
|
-
from ..aient.
|
|
19
|
-
from ..aient.
|
|
18
|
+
from ..aient.aient.models import chatgpt
|
|
19
|
+
from ..aient.aient.plugins import get_function_call_list, registry
|
|
20
20
|
|
|
21
21
|
worker_system_prompt = """
|
|
22
22
|
你是{name}。帮助用户头脑风暴。请分析不同用户的观点,并给出你的观点。
|
beswarm/agents/planact.py
CHANGED
|
@@ -10,8 +10,8 @@ from datetime import datetime
|
|
|
10
10
|
from typing import List, Dict, Union
|
|
11
11
|
|
|
12
12
|
from ..broker import MessageBroker
|
|
13
|
-
from ..aient.
|
|
14
|
-
from ..aient.
|
|
13
|
+
from ..aient.aient.models import chatgpt
|
|
14
|
+
from ..aient.aient.plugins import get_function_call_list, registry
|
|
15
15
|
from ..prompt import worker_system_prompt, instruction_system_prompt
|
|
16
16
|
from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools, setup_logger
|
|
17
17
|
|
|
@@ -33,6 +33,8 @@ def gemini_json_poccess(response_str):
|
|
|
33
33
|
promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
|
|
34
34
|
candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
|
|
35
35
|
totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
|
|
36
|
+
if finishReason != "STOP":
|
|
37
|
+
logger.error(f"finishReason: {finishReason}")
|
|
36
38
|
|
|
37
39
|
content = reasoning_content = safe_get(json_data, "parts", 0, "text", default="")
|
|
38
40
|
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
|
@@ -424,53 +426,41 @@ async def fetch_claude_response_stream(client, url, headers, payload, model):
|
|
|
424
426
|
line, buffer = buffer.split("\n", 1)
|
|
425
427
|
# logger.info(line)
|
|
426
428
|
|
|
427
|
-
if line.startswith("data:"):
|
|
428
|
-
line = line.lstrip("data: ")
|
|
429
|
+
if line.startswith("data:") and (line := line.lstrip("data: ")):
|
|
429
430
|
resp: dict = json.loads(line)
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
tokens_use = message.get("usage")
|
|
437
|
-
if tokens_use:
|
|
438
|
-
input_tokens = tokens_use.get("input_tokens", 0)
|
|
439
|
-
usage = resp.get("usage")
|
|
440
|
-
if usage:
|
|
441
|
-
output_tokens = usage.get("output_tokens", 0)
|
|
431
|
+
|
|
432
|
+
input_tokens = input_tokens or safe_get(resp, "message", "usage", "input_tokens", default=0)
|
|
433
|
+
# cache_creation_input_tokens = safe_get(resp, "message", "usage", "cache_creation_input_tokens", default=0)
|
|
434
|
+
# cache_read_input_tokens = safe_get(resp, "message", "usage", "cache_read_input_tokens", default=0)
|
|
435
|
+
output_tokens = safe_get(resp, "usage", "output_tokens", default=0)
|
|
436
|
+
if output_tokens:
|
|
442
437
|
total_tokens = input_tokens + output_tokens
|
|
443
438
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, total_tokens, input_tokens, output_tokens)
|
|
444
439
|
yield sse_string
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
# print("tool_use", tool_use)
|
|
452
|
-
tools_id = tool_use["id"]
|
|
453
|
-
if "name" in tool_use:
|
|
454
|
-
function_call_name = tool_use["name"]
|
|
455
|
-
sse_string = await generate_sse_response(timestamp, model, None, tools_id, function_call_name, None)
|
|
456
|
-
yield sse_string
|
|
457
|
-
delta = resp.get("delta")
|
|
458
|
-
# print("delta", delta)
|
|
459
|
-
if not delta:
|
|
440
|
+
break
|
|
441
|
+
|
|
442
|
+
text = safe_get(resp, "delta", "text", default="")
|
|
443
|
+
if text:
|
|
444
|
+
sse_string = await generate_sse_response(timestamp, model, text)
|
|
445
|
+
yield sse_string
|
|
460
446
|
continue
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
447
|
+
|
|
448
|
+
function_call_name = safe_get(resp, "content_block", "name", default=None)
|
|
449
|
+
tools_id = safe_get(resp, "content_block", "id", default=None)
|
|
450
|
+
if tools_id and function_call_name:
|
|
451
|
+
sse_string = await generate_sse_response(timestamp, model, None, tools_id, function_call_name, None)
|
|
464
452
|
yield sse_string
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
453
|
+
|
|
454
|
+
thinking_content = safe_get(resp, "delta", "thinking", default="")
|
|
455
|
+
if thinking_content:
|
|
456
|
+
sse_string = await generate_sse_response(timestamp, model, reasoning_content=thinking_content)
|
|
468
457
|
yield sse_string
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
458
|
+
|
|
459
|
+
function_call_content = safe_get(resp, "delta", "partial_json", default="")
|
|
460
|
+
if function_call_content:
|
|
472
461
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
|
|
473
462
|
yield sse_string
|
|
463
|
+
|
|
474
464
|
yield "data: [DONE]" + end_of_line
|
|
475
465
|
|
|
476
466
|
async def fetch_aws_response_stream(client, url, headers, payload, model):
|
|
@@ -46,7 +46,10 @@ class BaseAPI:
|
|
|
46
46
|
before_v1 = ""
|
|
47
47
|
self.base_url: str = urlunparse(parsed_url[:2] + ("",) + ("",) * 3)
|
|
48
48
|
self.v1_url: str = urlunparse(parsed_url[:2]+ (before_v1,) + ("",) * 3)
|
|
49
|
-
|
|
49
|
+
if "v1/messages" in parsed_url.path:
|
|
50
|
+
self.v1_models: str = urlunparse(parsed_url[:2] + ("v1/models",) + ("",) * 3)
|
|
51
|
+
else:
|
|
52
|
+
self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
|
|
50
53
|
self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
|
|
51
54
|
self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
|
|
52
55
|
if parsed_url.hostname == "dashscope.aliyuncs.com":
|
|
@@ -192,7 +195,10 @@ def update_initial_model(provider):
|
|
|
192
195
|
endpoint_models_url = endpoint.v1_models
|
|
193
196
|
if isinstance(api, list):
|
|
194
197
|
api = api[0]
|
|
195
|
-
|
|
198
|
+
if "v1/messages" in api_url:
|
|
199
|
+
headers = {"x-api-key": api, "anthropic-version": "2023-06-01"}
|
|
200
|
+
else:
|
|
201
|
+
headers = {"Authorization": f"Bearer {api}"}
|
|
196
202
|
response = httpx.get(
|
|
197
203
|
endpoint_models_url,
|
|
198
204
|
headers=headers,
|
|
@@ -53,20 +53,10 @@ class BaseLLM:
|
|
|
53
53
|
"https": proxy,
|
|
54
54
|
},
|
|
55
55
|
)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
self.aclient = httpx.AsyncClient(
|
|
61
|
-
follow_redirects=True,
|
|
62
|
-
proxies=proxy,
|
|
63
|
-
timeout=timeout,
|
|
64
|
-
)
|
|
65
|
-
else:
|
|
66
|
-
self.aclient = httpx.AsyncClient(
|
|
67
|
-
follow_redirects=True,
|
|
68
|
-
timeout=timeout,
|
|
69
|
-
)
|
|
56
|
+
self._aclient = None
|
|
57
|
+
self._proxy = proxy
|
|
58
|
+
self._timeout = timeout
|
|
59
|
+
self._loop = None
|
|
70
60
|
|
|
71
61
|
self.conversation: dict[str, list[dict]] = {
|
|
72
62
|
"default": [
|
|
@@ -83,6 +73,33 @@ class BaseLLM:
|
|
|
83
73
|
self.use_plugins = use_plugins
|
|
84
74
|
self.print_log: bool = print_log
|
|
85
75
|
|
|
76
|
+
def _get_aclient(self):
|
|
77
|
+
"""
|
|
78
|
+
Lazily initialize and return the httpx.AsyncClient.
|
|
79
|
+
This method ensures the client is always bound to a running event loop.
|
|
80
|
+
"""
|
|
81
|
+
import asyncio
|
|
82
|
+
try:
|
|
83
|
+
loop = asyncio.get_running_loop()
|
|
84
|
+
except RuntimeError:
|
|
85
|
+
loop = asyncio.new_event_loop()
|
|
86
|
+
asyncio.set_event_loop(loop)
|
|
87
|
+
|
|
88
|
+
if self._aclient is None or self._aclient.is_closed or self._loop is not loop:
|
|
89
|
+
self._loop = loop
|
|
90
|
+
proxy = self._proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
|
|
91
|
+
proxies = proxy if proxy and "socks5h" not in proxy else None
|
|
92
|
+
self._aclient = httpx.AsyncClient(
|
|
93
|
+
follow_redirects=True,
|
|
94
|
+
proxy=proxies,
|
|
95
|
+
timeout=self._timeout,
|
|
96
|
+
)
|
|
97
|
+
return self._aclient
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def aclient(self):
|
|
101
|
+
return self._get_aclient()
|
|
102
|
+
|
|
86
103
|
def add_to_conversation(
|
|
87
104
|
self,
|
|
88
105
|
message: list,
|
|
@@ -196,7 +213,6 @@ class BaseLLM:
|
|
|
196
213
|
**kwargs,
|
|
197
214
|
):
|
|
198
215
|
response += chunk
|
|
199
|
-
# full_response: str = "".join([r async for r in response])
|
|
200
216
|
full_response: str = "".join(response)
|
|
201
217
|
return full_response
|
|
202
218
|
|
|
@@ -17,7 +17,7 @@ from ..plugins.registry import registry
|
|
|
17
17
|
from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
|
|
18
18
|
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
|
19
19
|
from ..core.request import prepare_request_payload
|
|
20
|
-
from ..core.response import fetch_response_stream
|
|
20
|
+
from ..core.response import fetch_response_stream, fetch_response
|
|
21
21
|
|
|
22
22
|
def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]:
|
|
23
23
|
"""
|
|
@@ -288,6 +288,7 @@ class chatgpt(BaseLLM):
|
|
|
288
288
|
convo_id: str = "default",
|
|
289
289
|
model: str = "",
|
|
290
290
|
pass_history: int = 9999,
|
|
291
|
+
stream: bool = True,
|
|
291
292
|
**kwargs,
|
|
292
293
|
):
|
|
293
294
|
self.conversation[convo_id][0] = {"role": "system","content": self.system_prompt + "\n\n" + self.get_latest_file_content()}
|
|
@@ -309,12 +310,13 @@ class chatgpt(BaseLLM):
|
|
|
309
310
|
{"role": "system","content": self.system_prompt + "\n\n" + self.get_latest_file_content()},
|
|
310
311
|
{"role": role, "content": prompt}
|
|
311
312
|
],
|
|
312
|
-
"stream":
|
|
313
|
-
"stream_options": {
|
|
314
|
-
"include_usage": True
|
|
315
|
-
},
|
|
313
|
+
"stream": stream,
|
|
316
314
|
"temperature": kwargs.get("temperature", self.temperature)
|
|
317
315
|
}
|
|
316
|
+
if stream:
|
|
317
|
+
request_data["stream_options"] = {
|
|
318
|
+
"include_usage": True
|
|
319
|
+
}
|
|
318
320
|
|
|
319
321
|
if kwargs.get("max_tokens", self.max_tokens):
|
|
320
322
|
request_data["max_tokens"] = kwargs.get("max_tokens", self.max_tokens)
|
|
@@ -674,137 +676,7 @@ class chatgpt(BaseLLM):
|
|
|
674
676
|
self.conversation[convo_id].pop(-1)
|
|
675
677
|
self.conversation[convo_id].pop(-1)
|
|
676
678
|
|
|
677
|
-
def
|
|
678
|
-
self,
|
|
679
|
-
prompt: list,
|
|
680
|
-
role: str = "user",
|
|
681
|
-
convo_id: str = "default",
|
|
682
|
-
model: str = "",
|
|
683
|
-
pass_history: int = 9999,
|
|
684
|
-
function_name: str = "",
|
|
685
|
-
total_tokens: int = 0,
|
|
686
|
-
function_arguments: str = "",
|
|
687
|
-
function_call_id: str = "",
|
|
688
|
-
language: str = "English",
|
|
689
|
-
system_prompt: str = None,
|
|
690
|
-
**kwargs,
|
|
691
|
-
):
|
|
692
|
-
"""
|
|
693
|
-
Ask a question (同步流式响应)
|
|
694
|
-
"""
|
|
695
|
-
# 准备会话
|
|
696
|
-
self.system_prompt = system_prompt or self.system_prompt
|
|
697
|
-
if convo_id not in self.conversation or pass_history <= 2:
|
|
698
|
-
self.reset(convo_id=convo_id, system_prompt=system_prompt)
|
|
699
|
-
self.add_to_conversation(prompt, role, convo_id=convo_id, function_name=function_name, total_tokens=total_tokens, function_arguments=function_arguments, function_call_id=function_call_id, pass_history=pass_history)
|
|
700
|
-
|
|
701
|
-
# 获取请求体
|
|
702
|
-
json_post = None
|
|
703
|
-
async def get_post_body_async():
|
|
704
|
-
nonlocal json_post
|
|
705
|
-
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, **kwargs)
|
|
706
|
-
return url, headers, json_post, engine_type
|
|
707
|
-
|
|
708
|
-
# 替换原来的获取请求体的代码
|
|
709
|
-
# json_post = next(async_generator_to_sync(get_post_body_async()))
|
|
710
|
-
try:
|
|
711
|
-
url, headers, json_post, engine_type = asyncio.run(get_post_body_async())
|
|
712
|
-
except RuntimeError:
|
|
713
|
-
# 如果已经在事件循环中,则使用不同的方法
|
|
714
|
-
loop = asyncio.get_event_loop()
|
|
715
|
-
url, headers, json_post, engine_type = loop.run_until_complete(get_post_body_async())
|
|
716
|
-
|
|
717
|
-
self.truncate_conversation(convo_id=convo_id)
|
|
718
|
-
|
|
719
|
-
# 打印日志
|
|
720
|
-
if self.print_log:
|
|
721
|
-
self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
|
|
722
|
-
self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
|
|
723
|
-
|
|
724
|
-
# 发送请求并处理响应
|
|
725
|
-
for _ in range(3):
|
|
726
|
-
if self.print_log:
|
|
727
|
-
replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
|
|
728
|
-
replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
|
|
729
|
-
self.logger.info(f"Request Body:\n{replaced_text_str}")
|
|
730
|
-
|
|
731
|
-
try:
|
|
732
|
-
# 改进处理方式,创建一个内部异步函数来处理异步调用
|
|
733
|
-
async def process_async():
|
|
734
|
-
# 异步调用 fetch_response_stream
|
|
735
|
-
# self.logger.info("--------------------------------")
|
|
736
|
-
# self.logger.info(prompt)
|
|
737
|
-
# self.logger.info(parse_function_xml(prompt))
|
|
738
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
|
|
739
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
|
|
740
|
-
# self.logger.info("--------------------------------")
|
|
741
|
-
if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
|
|
742
|
-
tmp_response = {
|
|
743
|
-
"id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
|
|
744
|
-
"object": "chat.completion.chunk",
|
|
745
|
-
"created": 1754588695,
|
|
746
|
-
"model": "gemini-2.5-flash",
|
|
747
|
-
"choices": [
|
|
748
|
-
{
|
|
749
|
-
"index": 0,
|
|
750
|
-
"delta": {
|
|
751
|
-
"role": "assistant",
|
|
752
|
-
"content": prompt
|
|
753
|
-
},
|
|
754
|
-
"finish_reason": "stop"
|
|
755
|
-
}
|
|
756
|
-
],
|
|
757
|
-
"system_fingerprint": "fp_d576307f90"
|
|
758
|
-
}
|
|
759
|
-
async def _mock_response_generator():
|
|
760
|
-
yield f"data: {json.dumps(tmp_response)}\n\n"
|
|
761
|
-
async_generator = _mock_response_generator()
|
|
762
|
-
else:
|
|
763
|
-
async_generator = fetch_response_stream(
|
|
764
|
-
self.aclient,
|
|
765
|
-
url,
|
|
766
|
-
headers,
|
|
767
|
-
json_post,
|
|
768
|
-
engine_type,
|
|
769
|
-
model or self.engine,
|
|
770
|
-
)
|
|
771
|
-
# 异步处理响应流
|
|
772
|
-
async for chunk in self._process_stream_response(
|
|
773
|
-
async_generator,
|
|
774
|
-
convo_id=convo_id,
|
|
775
|
-
function_name=function_name,
|
|
776
|
-
total_tokens=total_tokens,
|
|
777
|
-
function_arguments=function_arguments,
|
|
778
|
-
function_call_id=function_call_id,
|
|
779
|
-
model=model,
|
|
780
|
-
language=language,
|
|
781
|
-
system_prompt=system_prompt,
|
|
782
|
-
pass_history=pass_history,
|
|
783
|
-
is_async=True,
|
|
784
|
-
**kwargs
|
|
785
|
-
):
|
|
786
|
-
yield chunk
|
|
787
|
-
|
|
788
|
-
# 将异步函数转换为同步生成器
|
|
789
|
-
return async_generator_to_sync(process_async())
|
|
790
|
-
except ConnectionError:
|
|
791
|
-
self.logger.error("连接错误,请检查服务器状态或网络连接。")
|
|
792
|
-
return
|
|
793
|
-
except requests.exceptions.ReadTimeout:
|
|
794
|
-
self.logger.error("请求超时,请检查网络连接或增加超时时间。")
|
|
795
|
-
return
|
|
796
|
-
except httpx.RemoteProtocolError:
|
|
797
|
-
continue
|
|
798
|
-
except Exception as e:
|
|
799
|
-
self.logger.error(f"发生了未预料的错误:{e}")
|
|
800
|
-
if "Invalid URL" in str(e):
|
|
801
|
-
e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
|
802
|
-
raise Exception(f"{e}")
|
|
803
|
-
# 最后一次重试失败,向上抛出异常
|
|
804
|
-
if _ == 2:
|
|
805
|
-
raise Exception(f"{e}")
|
|
806
|
-
|
|
807
|
-
async def ask_stream_async(
|
|
679
|
+
async def _ask_stream_handler(
|
|
808
680
|
self,
|
|
809
681
|
prompt: list,
|
|
810
682
|
role: str = "user",
|
|
@@ -817,10 +689,11 @@ class chatgpt(BaseLLM):
|
|
|
817
689
|
function_call_id: str = "",
|
|
818
690
|
language: str = "English",
|
|
819
691
|
system_prompt: str = None,
|
|
692
|
+
stream: bool = True,
|
|
820
693
|
**kwargs,
|
|
821
694
|
):
|
|
822
695
|
"""
|
|
823
|
-
|
|
696
|
+
Unified stream handler (async)
|
|
824
697
|
"""
|
|
825
698
|
# 准备会话
|
|
826
699
|
self.system_prompt = system_prompt or self.system_prompt
|
|
@@ -829,89 +702,64 @@ class chatgpt(BaseLLM):
|
|
|
829
702
|
self.add_to_conversation(prompt, role, convo_id=convo_id, function_name=function_name, total_tokens=total_tokens, function_arguments=function_arguments, pass_history=pass_history, function_call_id=function_call_id)
|
|
830
703
|
|
|
831
704
|
# 获取请求体
|
|
832
|
-
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, **kwargs)
|
|
705
|
+
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, stream=stream, **kwargs)
|
|
833
706
|
self.truncate_conversation(convo_id=convo_id)
|
|
834
707
|
|
|
835
708
|
# 打印日志
|
|
836
709
|
if self.print_log:
|
|
837
|
-
self.logger.info(f"api_url: {url}")
|
|
710
|
+
self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
|
|
838
711
|
self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
|
|
839
712
|
|
|
840
713
|
# 发送请求并处理响应
|
|
841
|
-
for
|
|
714
|
+
for i in range(3):
|
|
842
715
|
if self.print_log:
|
|
843
716
|
replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
|
|
844
717
|
replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
|
|
845
718
|
self.logger.info(f"Request Body:\n{replaced_text_str}")
|
|
846
719
|
|
|
847
720
|
try:
|
|
848
|
-
# 使用fetch_response_stream处理响应
|
|
849
|
-
# self.logger.info("--------------------------------")
|
|
850
|
-
# self.logger.info(prompt)
|
|
851
|
-
# self.logger.info(parse_function_xml(prompt))
|
|
852
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
|
|
853
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
|
|
854
|
-
# self.logger.info("--------------------------------")
|
|
855
721
|
if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
|
|
856
722
|
tmp_response = {
|
|
857
723
|
"id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
|
|
858
724
|
"object": "chat.completion.chunk",
|
|
859
725
|
"created": 1754588695,
|
|
860
|
-
"model":
|
|
726
|
+
"model": model or self.engine,
|
|
861
727
|
"choices": [
|
|
862
728
|
{
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
"
|
|
866
|
-
"content": prompt
|
|
867
|
-
},
|
|
868
|
-
"finish_reason": "stop"
|
|
729
|
+
"index": 0,
|
|
730
|
+
"delta": {"role": "assistant", "content": prompt},
|
|
731
|
+
"finish_reason": "stop",
|
|
869
732
|
}
|
|
870
733
|
],
|
|
871
|
-
"system_fingerprint": "fp_d576307f90"
|
|
734
|
+
"system_fingerprint": "fp_d576307f90",
|
|
872
735
|
}
|
|
873
736
|
async def _mock_response_generator():
|
|
874
737
|
yield f"data: {json.dumps(tmp_response)}\n\n"
|
|
875
738
|
generator = _mock_response_generator()
|
|
876
739
|
else:
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
# if isinstance(chunk, dict) and "error" in chunk:
|
|
886
|
-
# # 处理错误响应
|
|
887
|
-
# if chunk["status_code"] in (400, 422, 503):
|
|
888
|
-
# json_post, should_retry = await self._handle_response_error(
|
|
889
|
-
# type('Response', (), {'status_code': chunk["status_code"], 'text': json.dumps(chunk["details"]), 'aread': lambda: asyncio.sleep(0)}),
|
|
890
|
-
# json_post
|
|
891
|
-
# )
|
|
892
|
-
# if should_retry:
|
|
893
|
-
# break # 跳出内部循环,继续外部循环重试
|
|
894
|
-
# raise Exception(f"{chunk['status_code']} {chunk['error']} {chunk['details']}")
|
|
740
|
+
if stream:
|
|
741
|
+
generator = fetch_response_stream(
|
|
742
|
+
self.aclient, url, headers, json_post, engine_type, model or self.engine,
|
|
743
|
+
)
|
|
744
|
+
else:
|
|
745
|
+
generator = fetch_response(
|
|
746
|
+
self.aclient, url, headers, json_post, engine_type, model or self.engine,
|
|
747
|
+
)
|
|
895
748
|
|
|
896
749
|
# 处理正常响应
|
|
897
750
|
async for processed_chunk in self._process_stream_response(
|
|
898
|
-
generator,
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
function_arguments=function_arguments,
|
|
903
|
-
function_call_id=function_call_id,
|
|
904
|
-
model=model,
|
|
905
|
-
language=language,
|
|
906
|
-
system_prompt=system_prompt,
|
|
907
|
-
pass_history=pass_history,
|
|
908
|
-
is_async=True,
|
|
909
|
-
**kwargs
|
|
751
|
+
generator, convo_id=convo_id, function_name=function_name,
|
|
752
|
+
total_tokens=total_tokens, function_arguments=function_arguments,
|
|
753
|
+
function_call_id=function_call_id, model=model, language=language,
|
|
754
|
+
system_prompt=system_prompt, pass_history=pass_history, is_async=True, **kwargs
|
|
910
755
|
):
|
|
911
756
|
yield processed_chunk
|
|
912
757
|
|
|
913
758
|
# 成功处理,跳出重试循环
|
|
914
759
|
break
|
|
760
|
+
except (httpx.ConnectError, httpx.ReadTimeout):
|
|
761
|
+
self.logger.error("连接或读取超时错误,请检查服务器状态或网络连接。")
|
|
762
|
+
return # Stop iteration
|
|
915
763
|
except httpx.RemoteProtocolError:
|
|
916
764
|
continue
|
|
917
765
|
except Exception as e:
|
|
@@ -922,9 +770,69 @@ class chatgpt(BaseLLM):
|
|
|
922
770
|
e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
|
923
771
|
raise Exception(f"{e}")
|
|
924
772
|
# 最后一次重试失败,向上抛出异常
|
|
925
|
-
if
|
|
773
|
+
if i == 2:
|
|
926
774
|
raise Exception(f"{e}")
|
|
927
775
|
|
|
776
|
+
def ask_stream(
|
|
777
|
+
self,
|
|
778
|
+
prompt: list,
|
|
779
|
+
role: str = "user",
|
|
780
|
+
convo_id: str = "default",
|
|
781
|
+
model: str = "",
|
|
782
|
+
pass_history: int = 9999,
|
|
783
|
+
function_name: str = "",
|
|
784
|
+
total_tokens: int = 0,
|
|
785
|
+
function_arguments: str = "",
|
|
786
|
+
function_call_id: str = "",
|
|
787
|
+
language: str = "English",
|
|
788
|
+
system_prompt: str = None,
|
|
789
|
+
stream: bool = True,
|
|
790
|
+
**kwargs,
|
|
791
|
+
):
|
|
792
|
+
"""
|
|
793
|
+
Ask a question (同步流式响应)
|
|
794
|
+
"""
|
|
795
|
+
try:
|
|
796
|
+
loop = asyncio.get_event_loop()
|
|
797
|
+
if loop.is_closed():
|
|
798
|
+
loop = asyncio.new_event_loop()
|
|
799
|
+
asyncio.set_event_loop(loop)
|
|
800
|
+
except RuntimeError:
|
|
801
|
+
loop = asyncio.new_event_loop()
|
|
802
|
+
asyncio.set_event_loop(loop)
|
|
803
|
+
|
|
804
|
+
async_gen = self._ask_stream_handler(
|
|
805
|
+
prompt, role, convo_id, model, pass_history, function_name, total_tokens,
|
|
806
|
+
function_arguments, function_call_id, language, system_prompt, stream, **kwargs
|
|
807
|
+
)
|
|
808
|
+
for chunk in async_generator_to_sync(async_gen):
|
|
809
|
+
yield chunk
|
|
810
|
+
|
|
811
|
+
async def ask_stream_async(
|
|
812
|
+
self,
|
|
813
|
+
prompt: list,
|
|
814
|
+
role: str = "user",
|
|
815
|
+
convo_id: str = "default",
|
|
816
|
+
model: str = "",
|
|
817
|
+
pass_history: int = 9999,
|
|
818
|
+
function_name: str = "",
|
|
819
|
+
total_tokens: int = 0,
|
|
820
|
+
function_arguments: str = "",
|
|
821
|
+
function_call_id: str = "",
|
|
822
|
+
language: str = "English",
|
|
823
|
+
system_prompt: str = None,
|
|
824
|
+
stream: bool = True,
|
|
825
|
+
**kwargs,
|
|
826
|
+
):
|
|
827
|
+
"""
|
|
828
|
+
Ask a question (异步流式响应)
|
|
829
|
+
"""
|
|
830
|
+
async for chunk in self._ask_stream_handler(
|
|
831
|
+
prompt, role, convo_id, model, pass_history, function_name, total_tokens,
|
|
832
|
+
function_arguments, function_call_id, language, system_prompt, stream, **kwargs
|
|
833
|
+
):
|
|
834
|
+
yield chunk
|
|
835
|
+
|
|
928
836
|
async def ask_async(
|
|
929
837
|
self,
|
|
930
838
|
prompt: str,
|
|
@@ -943,11 +851,36 @@ class chatgpt(BaseLLM):
|
|
|
943
851
|
convo_id=convo_id,
|
|
944
852
|
pass_history=pass_history,
|
|
945
853
|
model=model or self.engine,
|
|
854
|
+
stream=False,
|
|
946
855
|
**kwargs,
|
|
947
856
|
)
|
|
948
857
|
full_response: str = "".join([r async for r in response])
|
|
949
858
|
return full_response
|
|
950
859
|
|
|
860
|
+
def ask(
|
|
861
|
+
self,
|
|
862
|
+
prompt: str,
|
|
863
|
+
role: str = "user",
|
|
864
|
+
convo_id: str = "default",
|
|
865
|
+
model: str = "",
|
|
866
|
+
pass_history: int = 9999,
|
|
867
|
+
**kwargs,
|
|
868
|
+
) -> str:
|
|
869
|
+
"""
|
|
870
|
+
Non-streaming ask
|
|
871
|
+
"""
|
|
872
|
+
response = self.ask_stream(
|
|
873
|
+
prompt=prompt,
|
|
874
|
+
role=role,
|
|
875
|
+
convo_id=convo_id,
|
|
876
|
+
pass_history=pass_history,
|
|
877
|
+
model=model or self.engine,
|
|
878
|
+
stream=False,
|
|
879
|
+
**kwargs,
|
|
880
|
+
)
|
|
881
|
+
full_response: str = "".join([r for r in response])
|
|
882
|
+
return full_response
|
|
883
|
+
|
|
951
884
|
def rollback(self, n: int = 1, convo_id: str = "default") -> None:
|
|
952
885
|
"""
|
|
953
886
|
Rollback the conversation
|
|
@@ -276,4 +276,4 @@ for i in content.split("\\n"):
|
|
|
276
276
|
# print(excute_command(python_long_task_command))
|
|
277
277
|
|
|
278
278
|
# print(get_python_executable("python -c 'print(123)'"))
|
|
279
|
-
# python -m beswarm.aient.
|
|
279
|
+
# python -m beswarm.aient.aient.plugins.excute_command
|
|
@@ -181,7 +181,7 @@ Examples:
|
|
|
181
181
|
return f"<tool_error>读取文件时发生错误: {e}</tool_error>"
|
|
182
182
|
|
|
183
183
|
if __name__ == "__main__":
|
|
184
|
-
# python -m beswarm.aient.
|
|
184
|
+
# python -m beswarm.aient.aient.plugins.read_file
|
|
185
185
|
result = read_file("./work/cax/Lenia Notebook.ipynb")
|
|
186
186
|
print(result)
|
|
187
187
|
print(len(result))
|
|
@@ -344,7 +344,7 @@ async def get_search_results(query):
|
|
|
344
344
|
|
|
345
345
|
if __name__ == "__main__":
|
|
346
346
|
os.system("clear")
|
|
347
|
-
# python -m beswarm.aient.
|
|
347
|
+
# python -m beswarm.aient.aient.plugins.websearch
|
|
348
348
|
print(get_url_content(""))
|
|
349
349
|
# from aient.models import chatgpt
|
|
350
350
|
# print(get_search_results("今天的微博热搜有哪些?", chatgpt.chatgpt_api_url.v1_url))
|