beswarm 0.2.51__tar.gz → 0.2.52__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of beswarm might be problematic. Click here for more details.
- {beswarm-0.2.51 → beswarm-0.2.52}/PKG-INFO +1 -1
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/response.py +26 -38
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/utils.py +8 -2
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/models/base.py +31 -15
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/models/chatgpt.py +119 -186
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/utils/scripts.py +2 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm.egg-info/PKG-INFO +1 -1
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm.egg-info/SOURCES.txt +0 -1
- {beswarm-0.2.51 → beswarm-0.2.52}/pyproject.toml +1 -1
- beswarm-0.2.51/beswarm/aient/main.py +0 -15
- {beswarm-0.2.51 → beswarm-0.2.52}/MANIFEST.in +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/README.md +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/agents/chatgroup.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/agents/planact.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/log_config.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/models.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/request.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/test/test_base_api.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/test/test_geminimask.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/test/test_image.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/core/test/test_payload.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/models/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/models/audio.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/arXiv.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/config.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/excute_command.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/get_time.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/image.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/list_directory.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/read_file.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/read_image.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/readonly.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/registry.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/run_python.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/websearch.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/plugins/write_file.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/utils/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/aient/utils/prompt.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_Web_crawler.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_ddg_search.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_google_search.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_ollama.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_plugin.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_search.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_url.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_whisper.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/aient/test/test_yjh.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/bemcp/bemcp/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/bemcp/bemcp/decorator.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/bemcp/bemcp/main.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/bemcp/bemcp/utils.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/bemcp/test/client.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/bemcp/test/server.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/broker.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/core.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/knowledge_graph.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/prompt.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/README.md +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/arduino-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/c-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/chatito-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/commonlisp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/cpp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/csharp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/d-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/dart-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/elisp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/elixir-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/elm-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/gleam-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/go-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/java-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/javascript-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/lua-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/pony-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/properties-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/python-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/r-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/racket-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/ruby-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/rust-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/solidity-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/swift-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/udev-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/README.md +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/c-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/c_sharp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/cpp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/dart-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/elisp-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/elixir-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/elm-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/go-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/hcl-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/java-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/javascript-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/kotlin-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/ocaml-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/php-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/python-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/ql-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/ruby-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/rust-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/scala-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-languages/typescript-tags.scm +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/taskmanager.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/__init__.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/click.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/completion.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/edit_file.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/graph.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/planner.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/repomap.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/request_input.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/screenshot.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/search_arxiv.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/search_web.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/subtasks.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/worker.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/tools/write_csv.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm/utils.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm.egg-info/dependency_links.txt +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm.egg-info/requires.txt +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/beswarm.egg-info/top_level.txt +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/setup.cfg +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/test/test_TaskManager.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/test/test_broker.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/test/test_graph.py +0 -0
- {beswarm-0.2.51 → beswarm-0.2.52}/test/test_new_TaskManager.py +0 -0
|
@@ -426,53 +426,41 @@ async def fetch_claude_response_stream(client, url, headers, payload, model):
|
|
|
426
426
|
line, buffer = buffer.split("\n", 1)
|
|
427
427
|
# logger.info(line)
|
|
428
428
|
|
|
429
|
-
if line.startswith("data:"):
|
|
430
|
-
line = line.lstrip("data: ")
|
|
429
|
+
if line.startswith("data:") and (line := line.lstrip("data: ")):
|
|
431
430
|
resp: dict = json.loads(line)
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
tokens_use = message.get("usage")
|
|
439
|
-
if tokens_use:
|
|
440
|
-
input_tokens = tokens_use.get("input_tokens", 0)
|
|
441
|
-
usage = resp.get("usage")
|
|
442
|
-
if usage:
|
|
443
|
-
output_tokens = usage.get("output_tokens", 0)
|
|
431
|
+
|
|
432
|
+
input_tokens = input_tokens or safe_get(resp, "message", "usage", "input_tokens", default=0)
|
|
433
|
+
# cache_creation_input_tokens = safe_get(resp, "message", "usage", "cache_creation_input_tokens", default=0)
|
|
434
|
+
# cache_read_input_tokens = safe_get(resp, "message", "usage", "cache_read_input_tokens", default=0)
|
|
435
|
+
output_tokens = safe_get(resp, "usage", "output_tokens", default=0)
|
|
436
|
+
if output_tokens:
|
|
444
437
|
total_tokens = input_tokens + output_tokens
|
|
445
438
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, total_tokens, input_tokens, output_tokens)
|
|
446
439
|
yield sse_string
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
# print("tool_use", tool_use)
|
|
454
|
-
tools_id = tool_use["id"]
|
|
455
|
-
if "name" in tool_use:
|
|
456
|
-
function_call_name = tool_use["name"]
|
|
457
|
-
sse_string = await generate_sse_response(timestamp, model, None, tools_id, function_call_name, None)
|
|
458
|
-
yield sse_string
|
|
459
|
-
delta = resp.get("delta")
|
|
460
|
-
# print("delta", delta)
|
|
461
|
-
if not delta:
|
|
440
|
+
break
|
|
441
|
+
|
|
442
|
+
text = safe_get(resp, "delta", "text", default="")
|
|
443
|
+
if text:
|
|
444
|
+
sse_string = await generate_sse_response(timestamp, model, text)
|
|
445
|
+
yield sse_string
|
|
462
446
|
continue
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
447
|
+
|
|
448
|
+
function_call_name = safe_get(resp, "content_block", "name", default=None)
|
|
449
|
+
tools_id = safe_get(resp, "content_block", "id", default=None)
|
|
450
|
+
if tools_id and function_call_name:
|
|
451
|
+
sse_string = await generate_sse_response(timestamp, model, None, tools_id, function_call_name, None)
|
|
466
452
|
yield sse_string
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
453
|
+
|
|
454
|
+
thinking_content = safe_get(resp, "delta", "thinking", default="")
|
|
455
|
+
if thinking_content:
|
|
456
|
+
sse_string = await generate_sse_response(timestamp, model, reasoning_content=thinking_content)
|
|
470
457
|
yield sse_string
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
458
|
+
|
|
459
|
+
function_call_content = safe_get(resp, "delta", "partial_json", default="")
|
|
460
|
+
if function_call_content:
|
|
474
461
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
|
|
475
462
|
yield sse_string
|
|
463
|
+
|
|
476
464
|
yield "data: [DONE]" + end_of_line
|
|
477
465
|
|
|
478
466
|
async def fetch_aws_response_stream(client, url, headers, payload, model):
|
|
@@ -46,7 +46,10 @@ class BaseAPI:
|
|
|
46
46
|
before_v1 = ""
|
|
47
47
|
self.base_url: str = urlunparse(parsed_url[:2] + ("",) + ("",) * 3)
|
|
48
48
|
self.v1_url: str = urlunparse(parsed_url[:2]+ (before_v1,) + ("",) * 3)
|
|
49
|
-
|
|
49
|
+
if "v1/messages" in parsed_url.path:
|
|
50
|
+
self.v1_models: str = urlunparse(parsed_url[:2] + ("v1/models",) + ("",) * 3)
|
|
51
|
+
else:
|
|
52
|
+
self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
|
|
50
53
|
self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
|
|
51
54
|
self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
|
|
52
55
|
if parsed_url.hostname == "dashscope.aliyuncs.com":
|
|
@@ -192,7 +195,10 @@ def update_initial_model(provider):
|
|
|
192
195
|
endpoint_models_url = endpoint.v1_models
|
|
193
196
|
if isinstance(api, list):
|
|
194
197
|
api = api[0]
|
|
195
|
-
|
|
198
|
+
if "v1/messages" in api_url:
|
|
199
|
+
headers = {"x-api-key": api, "anthropic-version": "2023-06-01"}
|
|
200
|
+
else:
|
|
201
|
+
headers = {"Authorization": f"Bearer {api}"}
|
|
196
202
|
response = httpx.get(
|
|
197
203
|
endpoint_models_url,
|
|
198
204
|
headers=headers,
|
|
@@ -53,20 +53,10 @@ class BaseLLM:
|
|
|
53
53
|
"https": proxy,
|
|
54
54
|
},
|
|
55
55
|
)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
self.aclient = httpx.AsyncClient(
|
|
61
|
-
follow_redirects=True,
|
|
62
|
-
proxies=proxy,
|
|
63
|
-
timeout=timeout,
|
|
64
|
-
)
|
|
65
|
-
else:
|
|
66
|
-
self.aclient = httpx.AsyncClient(
|
|
67
|
-
follow_redirects=True,
|
|
68
|
-
timeout=timeout,
|
|
69
|
-
)
|
|
56
|
+
self._aclient = None
|
|
57
|
+
self._proxy = proxy
|
|
58
|
+
self._timeout = timeout
|
|
59
|
+
self._loop = None
|
|
70
60
|
|
|
71
61
|
self.conversation: dict[str, list[dict]] = {
|
|
72
62
|
"default": [
|
|
@@ -83,6 +73,33 @@ class BaseLLM:
|
|
|
83
73
|
self.use_plugins = use_plugins
|
|
84
74
|
self.print_log: bool = print_log
|
|
85
75
|
|
|
76
|
+
def _get_aclient(self):
|
|
77
|
+
"""
|
|
78
|
+
Lazily initialize and return the httpx.AsyncClient.
|
|
79
|
+
This method ensures the client is always bound to a running event loop.
|
|
80
|
+
"""
|
|
81
|
+
import asyncio
|
|
82
|
+
try:
|
|
83
|
+
loop = asyncio.get_running_loop()
|
|
84
|
+
except RuntimeError:
|
|
85
|
+
loop = asyncio.new_event_loop()
|
|
86
|
+
asyncio.set_event_loop(loop)
|
|
87
|
+
|
|
88
|
+
if self._aclient is None or self._aclient.is_closed or self._loop is not loop:
|
|
89
|
+
self._loop = loop
|
|
90
|
+
proxy = self._proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
|
|
91
|
+
proxies = proxy if proxy and "socks5h" not in proxy else None
|
|
92
|
+
self._aclient = httpx.AsyncClient(
|
|
93
|
+
follow_redirects=True,
|
|
94
|
+
proxy=proxies,
|
|
95
|
+
timeout=self._timeout,
|
|
96
|
+
)
|
|
97
|
+
return self._aclient
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def aclient(self):
|
|
101
|
+
return self._get_aclient()
|
|
102
|
+
|
|
86
103
|
def add_to_conversation(
|
|
87
104
|
self,
|
|
88
105
|
message: list,
|
|
@@ -196,7 +213,6 @@ class BaseLLM:
|
|
|
196
213
|
**kwargs,
|
|
197
214
|
):
|
|
198
215
|
response += chunk
|
|
199
|
-
# full_response: str = "".join([r async for r in response])
|
|
200
216
|
full_response: str = "".join(response)
|
|
201
217
|
return full_response
|
|
202
218
|
|
|
@@ -17,7 +17,7 @@ from ..plugins.registry import registry
|
|
|
17
17
|
from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
|
|
18
18
|
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
|
19
19
|
from ..core.request import prepare_request_payload
|
|
20
|
-
from ..core.response import fetch_response_stream
|
|
20
|
+
from ..core.response import fetch_response_stream, fetch_response
|
|
21
21
|
|
|
22
22
|
def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]:
|
|
23
23
|
"""
|
|
@@ -288,6 +288,7 @@ class chatgpt(BaseLLM):
|
|
|
288
288
|
convo_id: str = "default",
|
|
289
289
|
model: str = "",
|
|
290
290
|
pass_history: int = 9999,
|
|
291
|
+
stream: bool = True,
|
|
291
292
|
**kwargs,
|
|
292
293
|
):
|
|
293
294
|
self.conversation[convo_id][0] = {"role": "system","content": self.system_prompt + "\n\n" + self.get_latest_file_content()}
|
|
@@ -309,12 +310,13 @@ class chatgpt(BaseLLM):
|
|
|
309
310
|
{"role": "system","content": self.system_prompt + "\n\n" + self.get_latest_file_content()},
|
|
310
311
|
{"role": role, "content": prompt}
|
|
311
312
|
],
|
|
312
|
-
"stream":
|
|
313
|
-
"stream_options": {
|
|
314
|
-
"include_usage": True
|
|
315
|
-
},
|
|
313
|
+
"stream": stream,
|
|
316
314
|
"temperature": kwargs.get("temperature", self.temperature)
|
|
317
315
|
}
|
|
316
|
+
if stream:
|
|
317
|
+
request_data["stream_options"] = {
|
|
318
|
+
"include_usage": True
|
|
319
|
+
}
|
|
318
320
|
|
|
319
321
|
if kwargs.get("max_tokens", self.max_tokens):
|
|
320
322
|
request_data["max_tokens"] = kwargs.get("max_tokens", self.max_tokens)
|
|
@@ -674,137 +676,7 @@ class chatgpt(BaseLLM):
|
|
|
674
676
|
self.conversation[convo_id].pop(-1)
|
|
675
677
|
self.conversation[convo_id].pop(-1)
|
|
676
678
|
|
|
677
|
-
def
|
|
678
|
-
self,
|
|
679
|
-
prompt: list,
|
|
680
|
-
role: str = "user",
|
|
681
|
-
convo_id: str = "default",
|
|
682
|
-
model: str = "",
|
|
683
|
-
pass_history: int = 9999,
|
|
684
|
-
function_name: str = "",
|
|
685
|
-
total_tokens: int = 0,
|
|
686
|
-
function_arguments: str = "",
|
|
687
|
-
function_call_id: str = "",
|
|
688
|
-
language: str = "English",
|
|
689
|
-
system_prompt: str = None,
|
|
690
|
-
**kwargs,
|
|
691
|
-
):
|
|
692
|
-
"""
|
|
693
|
-
Ask a question (同步流式响应)
|
|
694
|
-
"""
|
|
695
|
-
# 准备会话
|
|
696
|
-
self.system_prompt = system_prompt or self.system_prompt
|
|
697
|
-
if convo_id not in self.conversation or pass_history <= 2:
|
|
698
|
-
self.reset(convo_id=convo_id, system_prompt=system_prompt)
|
|
699
|
-
self.add_to_conversation(prompt, role, convo_id=convo_id, function_name=function_name, total_tokens=total_tokens, function_arguments=function_arguments, function_call_id=function_call_id, pass_history=pass_history)
|
|
700
|
-
|
|
701
|
-
# 获取请求体
|
|
702
|
-
json_post = None
|
|
703
|
-
async def get_post_body_async():
|
|
704
|
-
nonlocal json_post
|
|
705
|
-
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, **kwargs)
|
|
706
|
-
return url, headers, json_post, engine_type
|
|
707
|
-
|
|
708
|
-
# 替换原来的获取请求体的代码
|
|
709
|
-
# json_post = next(async_generator_to_sync(get_post_body_async()))
|
|
710
|
-
try:
|
|
711
|
-
url, headers, json_post, engine_type = asyncio.run(get_post_body_async())
|
|
712
|
-
except RuntimeError:
|
|
713
|
-
# 如果已经在事件循环中,则使用不同的方法
|
|
714
|
-
loop = asyncio.get_event_loop()
|
|
715
|
-
url, headers, json_post, engine_type = loop.run_until_complete(get_post_body_async())
|
|
716
|
-
|
|
717
|
-
self.truncate_conversation(convo_id=convo_id)
|
|
718
|
-
|
|
719
|
-
# 打印日志
|
|
720
|
-
if self.print_log:
|
|
721
|
-
self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
|
|
722
|
-
self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
|
|
723
|
-
|
|
724
|
-
# 发送请求并处理响应
|
|
725
|
-
for _ in range(3):
|
|
726
|
-
if self.print_log:
|
|
727
|
-
replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
|
|
728
|
-
replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
|
|
729
|
-
self.logger.info(f"Request Body:\n{replaced_text_str}")
|
|
730
|
-
|
|
731
|
-
try:
|
|
732
|
-
# 改进处理方式,创建一个内部异步函数来处理异步调用
|
|
733
|
-
async def process_async():
|
|
734
|
-
# 异步调用 fetch_response_stream
|
|
735
|
-
# self.logger.info("--------------------------------")
|
|
736
|
-
# self.logger.info(prompt)
|
|
737
|
-
# self.logger.info(parse_function_xml(prompt))
|
|
738
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
|
|
739
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
|
|
740
|
-
# self.logger.info("--------------------------------")
|
|
741
|
-
if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
|
|
742
|
-
tmp_response = {
|
|
743
|
-
"id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
|
|
744
|
-
"object": "chat.completion.chunk",
|
|
745
|
-
"created": 1754588695,
|
|
746
|
-
"model": "gemini-2.5-flash",
|
|
747
|
-
"choices": [
|
|
748
|
-
{
|
|
749
|
-
"index": 0,
|
|
750
|
-
"delta": {
|
|
751
|
-
"role": "assistant",
|
|
752
|
-
"content": prompt
|
|
753
|
-
},
|
|
754
|
-
"finish_reason": "stop"
|
|
755
|
-
}
|
|
756
|
-
],
|
|
757
|
-
"system_fingerprint": "fp_d576307f90"
|
|
758
|
-
}
|
|
759
|
-
async def _mock_response_generator():
|
|
760
|
-
yield f"data: {json.dumps(tmp_response)}\n\n"
|
|
761
|
-
async_generator = _mock_response_generator()
|
|
762
|
-
else:
|
|
763
|
-
async_generator = fetch_response_stream(
|
|
764
|
-
self.aclient,
|
|
765
|
-
url,
|
|
766
|
-
headers,
|
|
767
|
-
json_post,
|
|
768
|
-
engine_type,
|
|
769
|
-
model or self.engine,
|
|
770
|
-
)
|
|
771
|
-
# 异步处理响应流
|
|
772
|
-
async for chunk in self._process_stream_response(
|
|
773
|
-
async_generator,
|
|
774
|
-
convo_id=convo_id,
|
|
775
|
-
function_name=function_name,
|
|
776
|
-
total_tokens=total_tokens,
|
|
777
|
-
function_arguments=function_arguments,
|
|
778
|
-
function_call_id=function_call_id,
|
|
779
|
-
model=model,
|
|
780
|
-
language=language,
|
|
781
|
-
system_prompt=system_prompt,
|
|
782
|
-
pass_history=pass_history,
|
|
783
|
-
is_async=True,
|
|
784
|
-
**kwargs
|
|
785
|
-
):
|
|
786
|
-
yield chunk
|
|
787
|
-
|
|
788
|
-
# 将异步函数转换为同步生成器
|
|
789
|
-
return async_generator_to_sync(process_async())
|
|
790
|
-
except ConnectionError:
|
|
791
|
-
self.logger.error("连接错误,请检查服务器状态或网络连接。")
|
|
792
|
-
return
|
|
793
|
-
except requests.exceptions.ReadTimeout:
|
|
794
|
-
self.logger.error("请求超时,请检查网络连接或增加超时时间。")
|
|
795
|
-
return
|
|
796
|
-
except httpx.RemoteProtocolError:
|
|
797
|
-
continue
|
|
798
|
-
except Exception as e:
|
|
799
|
-
self.logger.error(f"发生了未预料的错误:{e}")
|
|
800
|
-
if "Invalid URL" in str(e):
|
|
801
|
-
e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
|
802
|
-
raise Exception(f"{e}")
|
|
803
|
-
# 最后一次重试失败,向上抛出异常
|
|
804
|
-
if _ == 2:
|
|
805
|
-
raise Exception(f"{e}")
|
|
806
|
-
|
|
807
|
-
async def ask_stream_async(
|
|
679
|
+
async def _ask_stream_handler(
|
|
808
680
|
self,
|
|
809
681
|
prompt: list,
|
|
810
682
|
role: str = "user",
|
|
@@ -817,10 +689,11 @@ class chatgpt(BaseLLM):
|
|
|
817
689
|
function_call_id: str = "",
|
|
818
690
|
language: str = "English",
|
|
819
691
|
system_prompt: str = None,
|
|
692
|
+
stream: bool = True,
|
|
820
693
|
**kwargs,
|
|
821
694
|
):
|
|
822
695
|
"""
|
|
823
|
-
|
|
696
|
+
Unified stream handler (async)
|
|
824
697
|
"""
|
|
825
698
|
# 准备会话
|
|
826
699
|
self.system_prompt = system_prompt or self.system_prompt
|
|
@@ -829,89 +702,64 @@ class chatgpt(BaseLLM):
|
|
|
829
702
|
self.add_to_conversation(prompt, role, convo_id=convo_id, function_name=function_name, total_tokens=total_tokens, function_arguments=function_arguments, pass_history=pass_history, function_call_id=function_call_id)
|
|
830
703
|
|
|
831
704
|
# 获取请求体
|
|
832
|
-
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, **kwargs)
|
|
705
|
+
url, headers, json_post, engine_type = await self.get_post_body(prompt, role, convo_id, model, pass_history, stream=stream, **kwargs)
|
|
833
706
|
self.truncate_conversation(convo_id=convo_id)
|
|
834
707
|
|
|
835
708
|
# 打印日志
|
|
836
709
|
if self.print_log:
|
|
837
|
-
self.logger.info(f"api_url: {url}")
|
|
710
|
+
self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
|
|
838
711
|
self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
|
|
839
712
|
|
|
840
713
|
# 发送请求并处理响应
|
|
841
|
-
for
|
|
714
|
+
for i in range(3):
|
|
842
715
|
if self.print_log:
|
|
843
716
|
replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
|
|
844
717
|
replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
|
|
845
718
|
self.logger.info(f"Request Body:\n{replaced_text_str}")
|
|
846
719
|
|
|
847
720
|
try:
|
|
848
|
-
# 使用fetch_response_stream处理响应
|
|
849
|
-
# self.logger.info("--------------------------------")
|
|
850
|
-
# self.logger.info(prompt)
|
|
851
|
-
# self.logger.info(parse_function_xml(prompt))
|
|
852
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)))
|
|
853
|
-
# self.logger.info(convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt)
|
|
854
|
-
# self.logger.info("--------------------------------")
|
|
855
721
|
if prompt and "</" in prompt and "<instructions>" not in prompt and convert_functions_to_xml(parse_function_xml(prompt)).strip() == prompt:
|
|
856
722
|
tmp_response = {
|
|
857
723
|
"id": "chatcmpl-zXCi5TxWy953TCcxFocSienhvx0BB",
|
|
858
724
|
"object": "chat.completion.chunk",
|
|
859
725
|
"created": 1754588695,
|
|
860
|
-
"model":
|
|
726
|
+
"model": model or self.engine,
|
|
861
727
|
"choices": [
|
|
862
728
|
{
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
"
|
|
866
|
-
"content": prompt
|
|
867
|
-
},
|
|
868
|
-
"finish_reason": "stop"
|
|
729
|
+
"index": 0,
|
|
730
|
+
"delta": {"role": "assistant", "content": prompt},
|
|
731
|
+
"finish_reason": "stop",
|
|
869
732
|
}
|
|
870
733
|
],
|
|
871
|
-
"system_fingerprint": "fp_d576307f90"
|
|
734
|
+
"system_fingerprint": "fp_d576307f90",
|
|
872
735
|
}
|
|
873
736
|
async def _mock_response_generator():
|
|
874
737
|
yield f"data: {json.dumps(tmp_response)}\n\n"
|
|
875
738
|
generator = _mock_response_generator()
|
|
876
739
|
else:
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
# if isinstance(chunk, dict) and "error" in chunk:
|
|
886
|
-
# # 处理错误响应
|
|
887
|
-
# if chunk["status_code"] in (400, 422, 503):
|
|
888
|
-
# json_post, should_retry = await self._handle_response_error(
|
|
889
|
-
# type('Response', (), {'status_code': chunk["status_code"], 'text': json.dumps(chunk["details"]), 'aread': lambda: asyncio.sleep(0)}),
|
|
890
|
-
# json_post
|
|
891
|
-
# )
|
|
892
|
-
# if should_retry:
|
|
893
|
-
# break # 跳出内部循环,继续外部循环重试
|
|
894
|
-
# raise Exception(f"{chunk['status_code']} {chunk['error']} {chunk['details']}")
|
|
740
|
+
if stream:
|
|
741
|
+
generator = fetch_response_stream(
|
|
742
|
+
self.aclient, url, headers, json_post, engine_type, model or self.engine,
|
|
743
|
+
)
|
|
744
|
+
else:
|
|
745
|
+
generator = fetch_response(
|
|
746
|
+
self.aclient, url, headers, json_post, engine_type, model or self.engine,
|
|
747
|
+
)
|
|
895
748
|
|
|
896
749
|
# 处理正常响应
|
|
897
750
|
async for processed_chunk in self._process_stream_response(
|
|
898
|
-
generator,
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
function_arguments=function_arguments,
|
|
903
|
-
function_call_id=function_call_id,
|
|
904
|
-
model=model,
|
|
905
|
-
language=language,
|
|
906
|
-
system_prompt=system_prompt,
|
|
907
|
-
pass_history=pass_history,
|
|
908
|
-
is_async=True,
|
|
909
|
-
**kwargs
|
|
751
|
+
generator, convo_id=convo_id, function_name=function_name,
|
|
752
|
+
total_tokens=total_tokens, function_arguments=function_arguments,
|
|
753
|
+
function_call_id=function_call_id, model=model, language=language,
|
|
754
|
+
system_prompt=system_prompt, pass_history=pass_history, is_async=True, **kwargs
|
|
910
755
|
):
|
|
911
756
|
yield processed_chunk
|
|
912
757
|
|
|
913
758
|
# 成功处理,跳出重试循环
|
|
914
759
|
break
|
|
760
|
+
except (httpx.ConnectError, httpx.ReadTimeout):
|
|
761
|
+
self.logger.error("连接或读取超时错误,请检查服务器状态或网络连接。")
|
|
762
|
+
return # Stop iteration
|
|
915
763
|
except httpx.RemoteProtocolError:
|
|
916
764
|
continue
|
|
917
765
|
except Exception as e:
|
|
@@ -922,9 +770,69 @@ class chatgpt(BaseLLM):
|
|
|
922
770
|
e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
|
|
923
771
|
raise Exception(f"{e}")
|
|
924
772
|
# 最后一次重试失败,向上抛出异常
|
|
925
|
-
if
|
|
773
|
+
if i == 2:
|
|
926
774
|
raise Exception(f"{e}")
|
|
927
775
|
|
|
776
|
+
def ask_stream(
|
|
777
|
+
self,
|
|
778
|
+
prompt: list,
|
|
779
|
+
role: str = "user",
|
|
780
|
+
convo_id: str = "default",
|
|
781
|
+
model: str = "",
|
|
782
|
+
pass_history: int = 9999,
|
|
783
|
+
function_name: str = "",
|
|
784
|
+
total_tokens: int = 0,
|
|
785
|
+
function_arguments: str = "",
|
|
786
|
+
function_call_id: str = "",
|
|
787
|
+
language: str = "English",
|
|
788
|
+
system_prompt: str = None,
|
|
789
|
+
stream: bool = True,
|
|
790
|
+
**kwargs,
|
|
791
|
+
):
|
|
792
|
+
"""
|
|
793
|
+
Ask a question (同步流式响应)
|
|
794
|
+
"""
|
|
795
|
+
try:
|
|
796
|
+
loop = asyncio.get_event_loop()
|
|
797
|
+
if loop.is_closed():
|
|
798
|
+
loop = asyncio.new_event_loop()
|
|
799
|
+
asyncio.set_event_loop(loop)
|
|
800
|
+
except RuntimeError:
|
|
801
|
+
loop = asyncio.new_event_loop()
|
|
802
|
+
asyncio.set_event_loop(loop)
|
|
803
|
+
|
|
804
|
+
async_gen = self._ask_stream_handler(
|
|
805
|
+
prompt, role, convo_id, model, pass_history, function_name, total_tokens,
|
|
806
|
+
function_arguments, function_call_id, language, system_prompt, stream, **kwargs
|
|
807
|
+
)
|
|
808
|
+
for chunk in async_generator_to_sync(async_gen):
|
|
809
|
+
yield chunk
|
|
810
|
+
|
|
811
|
+
async def ask_stream_async(
|
|
812
|
+
self,
|
|
813
|
+
prompt: list,
|
|
814
|
+
role: str = "user",
|
|
815
|
+
convo_id: str = "default",
|
|
816
|
+
model: str = "",
|
|
817
|
+
pass_history: int = 9999,
|
|
818
|
+
function_name: str = "",
|
|
819
|
+
total_tokens: int = 0,
|
|
820
|
+
function_arguments: str = "",
|
|
821
|
+
function_call_id: str = "",
|
|
822
|
+
language: str = "English",
|
|
823
|
+
system_prompt: str = None,
|
|
824
|
+
stream: bool = True,
|
|
825
|
+
**kwargs,
|
|
826
|
+
):
|
|
827
|
+
"""
|
|
828
|
+
Ask a question (异步流式响应)
|
|
829
|
+
"""
|
|
830
|
+
async for chunk in self._ask_stream_handler(
|
|
831
|
+
prompt, role, convo_id, model, pass_history, function_name, total_tokens,
|
|
832
|
+
function_arguments, function_call_id, language, system_prompt, stream, **kwargs
|
|
833
|
+
):
|
|
834
|
+
yield chunk
|
|
835
|
+
|
|
928
836
|
async def ask_async(
|
|
929
837
|
self,
|
|
930
838
|
prompt: str,
|
|
@@ -943,11 +851,36 @@ class chatgpt(BaseLLM):
|
|
|
943
851
|
convo_id=convo_id,
|
|
944
852
|
pass_history=pass_history,
|
|
945
853
|
model=model or self.engine,
|
|
854
|
+
stream=False,
|
|
946
855
|
**kwargs,
|
|
947
856
|
)
|
|
948
857
|
full_response: str = "".join([r async for r in response])
|
|
949
858
|
return full_response
|
|
950
859
|
|
|
860
|
+
def ask(
|
|
861
|
+
self,
|
|
862
|
+
prompt: str,
|
|
863
|
+
role: str = "user",
|
|
864
|
+
convo_id: str = "default",
|
|
865
|
+
model: str = "",
|
|
866
|
+
pass_history: int = 9999,
|
|
867
|
+
**kwargs,
|
|
868
|
+
) -> str:
|
|
869
|
+
"""
|
|
870
|
+
Non-streaming ask
|
|
871
|
+
"""
|
|
872
|
+
response = self.ask_stream(
|
|
873
|
+
prompt=prompt,
|
|
874
|
+
role=role,
|
|
875
|
+
convo_id=convo_id,
|
|
876
|
+
pass_history=pass_history,
|
|
877
|
+
model=model or self.engine,
|
|
878
|
+
stream=False,
|
|
879
|
+
**kwargs,
|
|
880
|
+
)
|
|
881
|
+
full_response: str = "".join([r for r in response])
|
|
882
|
+
return full_response
|
|
883
|
+
|
|
951
884
|
def rollback(self, n: int = 1, convo_id: str = "default") -> None:
|
|
952
885
|
"""
|
|
953
886
|
Rollback the conversation
|
|
@@ -212,6 +212,8 @@ def async_generator_to_sync(async_gen):
|
|
|
212
212
|
# 清理所有待处理的任务
|
|
213
213
|
tasks = [t for t in asyncio.all_tasks(loop) if not t.done()]
|
|
214
214
|
if tasks:
|
|
215
|
+
for task in tasks:
|
|
216
|
+
task.cancel()
|
|
215
217
|
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
|
|
216
218
|
loop.run_until_complete(loop.shutdown_asyncgens())
|
|
217
219
|
loop.close()
|
|
@@ -15,7 +15,6 @@ beswarm.egg-info/requires.txt
|
|
|
15
15
|
beswarm.egg-info/top_level.txt
|
|
16
16
|
beswarm/agents/chatgroup.py
|
|
17
17
|
beswarm/agents/planact.py
|
|
18
|
-
beswarm/aient/main.py
|
|
19
18
|
beswarm/aient/aient/__init__.py
|
|
20
19
|
beswarm/aient/aient/core/__init__.py
|
|
21
20
|
beswarm/aient/aient/core/log_config.py
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
|
|
3
|
-
from aient.utils import prompt
|
|
4
|
-
from aient.models import chatgpt
|
|
5
|
-
GPT_ENGINE = os.environ.get('MODEL')
|
|
6
|
-
|
|
7
|
-
API = os.environ.get('API_KEY')
|
|
8
|
-
API_URL = os.environ.get('BASE_URL', None)
|
|
9
|
-
|
|
10
|
-
message = "hi"
|
|
11
|
-
systemprompt = os.environ.get('SYSTEMPROMPT', prompt.chatgpt_system_prompt)
|
|
12
|
-
|
|
13
|
-
bot = chatgpt(api_key=API, api_url=API_URL , engine=GPT_ENGINE, system_prompt=systemprompt)
|
|
14
|
-
for text in bot.ask_stream(message):
|
|
15
|
-
print(text, end="")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/arduino-tags.scm
RENAMED
|
File without changes
|
|
File without changes
|
{beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/chatito-tags.scm
RENAMED
|
File without changes
|
{beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/commonlisp-tags.scm
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/javascript-tags.scm
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/properties-tags.scm
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{beswarm-0.2.51 → beswarm-0.2.52}/beswarm/queries/tree-sitter-language-pack/solidity-tags.scm
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|