beswarm 0.2.33__py3-none-any.whl → 0.2.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of beswarm might be problematic. Click here for more details.

beswarm/aient/setup.py CHANGED
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.51",
7
+ version="1.1.53",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -3,6 +3,8 @@ import json
3
3
  import httpx
4
4
  import base64
5
5
  import urllib.parse
6
+ from io import IOBase
7
+ from typing import Tuple
6
8
 
7
9
  from .models import RequestModel, Message
8
10
  from .utils import (
@@ -72,9 +74,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
72
74
  content.append(image_message)
73
75
  elif msg.content:
74
76
  content = [{"text": msg.content}]
75
- tool_calls = msg.tool_calls
76
77
  elif msg.content is None:
77
- continue
78
+ tool_calls = msg.tool_calls
78
79
 
79
80
  if tool_calls:
80
81
  tool_call = tool_calls[0]
@@ -108,7 +109,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
108
109
  }]
109
110
  }
110
111
  )
111
- elif msg.role != "system":
112
+ elif msg.role != "system" and content:
112
113
  messages.append({"role": msg.role, "parts": content})
113
114
  elif msg.role == "system":
114
115
  content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
@@ -239,7 +240,6 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
239
240
  ]
240
241
 
241
242
  if "gemini-2.5" in original_model:
242
- generation_config = payload.get("generationConfig", {})
243
243
  # 从请求模型名中检测思考预算设置
244
244
  m = re.match(r".*-think-(-?\d+)", request.model)
245
245
  if m:
@@ -254,7 +254,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
254
254
  budget = 32768
255
255
  else: # 128 <= val <= 32768
256
256
  budget = val
257
-
257
+
258
258
  # gemini-2.5-flash-lite: [0] or [512, 24576]
259
259
  elif "gemini-2.5-flash-lite" in original_model:
260
260
  if val > 0 and val < 512:
@@ -270,9 +270,9 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
270
270
  budget = 24576
271
271
  else: # Includes 0 and valid range, and clamps invalid negatives
272
272
  budget = val if val >= 0 else 0
273
-
273
+
274
274
  payload["generationConfig"]["thinkingConfig"] = {
275
- "includeThoughts": True,
275
+ "includeThoughts": True if budget else False,
276
276
  "thinkingBudget": budget
277
277
  }
278
278
  except ValueError:
@@ -282,11 +282,6 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
282
282
  payload["generationConfig"]["thinkingConfig"] = {
283
283
  "includeThoughts": True,
284
284
  }
285
- payload["generationConfig"] = generation_config
286
-
287
- # # 检测search标签
288
- # if request.model.endswith("-search"):
289
- # payload["tools"] = [{"googleSearch": {}}]
290
285
 
291
286
  if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
292
287
  for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
@@ -372,16 +367,12 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
372
367
  # search_tool = None
373
368
 
374
369
  # https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-0-flash?hl=zh-cn
375
- pro_models = ["gemini-2.5", "gemini-2.0"]
370
+ pro_models = ["gemini-2.5"]
376
371
  if any(pro_model in original_model for pro_model in pro_models):
377
- location = gemini2
372
+ location = gemini2_5_pro_exp
378
373
  else:
379
374
  location = gemini1
380
375
 
381
- if "gemini-2.5-flash-lite-preview-06-17" == original_model or \
382
- "gemini-2.5-pro-preview-06-05" == original_model:
383
- location = gemini2_5_pro_exp
384
-
385
376
  if "google-vertex-ai" in provider.get("base_url", ""):
386
377
  url = provider.get("base_url").rstrip('/') + "/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
387
378
  LOCATION=await location.next(),
@@ -390,24 +381,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
390
381
  stream=gemini_stream
391
382
  )
392
383
  elif api_key is not None and api_key[2] == ".":
393
- if provider.get("project_id") and "gemini-2.5-pro-preview-06-05" == original_model:
394
- if isinstance(provider.get("project_id"), list):
395
- api_key_index = provider.get("api").index(api_key)
396
- project_id = provider.get("project_id")[api_key_index]
397
- else:
398
- project_id = provider.get("project_id")
399
- url = f"https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/global/publishers/google/models/{original_model}:{gemini_stream}?key={api_key}"
400
- else:
401
- url = f"https://aiplatform.googleapis.com/v1/publishers/google/models/{original_model}:{gemini_stream}?key={api_key}"
384
+ url = f"https://aiplatform.googleapis.com/v1/publishers/google/models/{original_model}:{gemini_stream}?key={api_key}"
402
385
  headers.pop("Authorization", None)
403
- elif "gemini-2.5-flash-lite-preview-06-17" == original_model or \
404
- "gemini-2.5-pro-preview-06-05" == original_model:
405
- url = "https://aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
406
- LOCATION=await location.next(),
407
- PROJECT_ID=project_id,
408
- MODEL_ID=original_model,
409
- stream=gemini_stream
410
- )
411
386
  else:
412
387
  url = "https://{LOCATION}-aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
413
388
  LOCATION=await location.next(),
@@ -433,8 +408,9 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
433
408
  elif item.type == "image_url" and provider.get("image", True):
434
409
  image_message = await get_image_message(item.image_url.url, engine)
435
410
  content.append(image_message)
436
- else:
411
+ elif msg.content:
437
412
  content = [{"text": msg.content}]
413
+ elif msg.content is None:
438
414
  tool_calls = msg.tool_calls
439
415
 
440
416
  if tool_calls:
@@ -469,7 +445,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
469
445
  }]
470
446
  }
471
447
  )
472
- elif msg.role != "system":
448
+ elif msg.role != "system" and content:
473
449
  messages.append({"role": msg.role, "parts": content})
474
450
  elif msg.role == "system":
475
451
  system_prompt = system_prompt + "\n\n" + content[0]["text"]
@@ -573,7 +549,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
573
549
  budget = 32768
574
550
  else: # 128 <= val <= 32768
575
551
  budget = val
576
-
552
+
577
553
  # gemini-2.5-flash-lite: [0] or [512, 24576]
578
554
  elif "gemini-2.5-flash-lite" in original_model:
579
555
  if val > 0 and val < 512:
@@ -589,9 +565,9 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
589
565
  budget = 24576
590
566
  else: # Includes 0 and valid range, and clamps invalid negatives
591
567
  budget = val if val >= 0 else 0
592
-
568
+
593
569
  payload["generationConfig"]["thinkingConfig"] = {
594
- "includeThoughts": True,
570
+ "includeThoughts": True if budget else False,
595
571
  "thinkingBudget": budget
596
572
  }
597
573
  except ValueError:
@@ -602,9 +578,6 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
602
578
  "includeThoughts": True,
603
579
  }
604
580
 
605
- # if request.model.endswith("-search"):
606
- # payload["tools"] = [search_tool]
607
-
608
581
  if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
609
582
  for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
610
583
  if key == request.model:
@@ -1822,21 +1795,98 @@ async def get_dalle_payload(request, engine, provider, api_key=None):
1822
1795
 
1823
1796
  return url, headers, payload
1824
1797
 
1798
+ async def get_upload_certificate(client: httpx.AsyncClient, api_key: str, model: str) -> dict:
1799
+ """第一步:获取文件上传凭证"""
1800
+ # print("步骤 1: 正在获取上传凭证...")
1801
+ headers = {"Authorization": f"Bearer {api_key}"}
1802
+ params = {"action": "getPolicy", "model": model}
1803
+ try:
1804
+ response = await client.get("https://dashscope.aliyuncs.com/api/v1/uploads", headers=headers, params=params)
1805
+ response.raise_for_status() # 如果请求失败则抛出异常
1806
+ cert_data = response.json()
1807
+ # print("凭证获取成功。")
1808
+ return cert_data.get("data")
1809
+ except httpx.HTTPStatusError as e:
1810
+ print(f"获取凭证失败: HTTP {e.response.status_code}")
1811
+ print(f"响应内容: {e.response.text}")
1812
+ return None
1813
+ except Exception as e:
1814
+ print(f"获取凭证时发生未知错误: {e}")
1815
+ return None
1816
+
1817
+ from mimetypes import guess_type
1818
+
1819
+ async def upload_file_to_oss(client: httpx.AsyncClient, certificate: dict, file: Tuple[str, IOBase, str]) -> str:
1820
+ """第二步:使用凭证将文件内容上传到OSS"""
1821
+ upload_host = certificate.get("upload_host")
1822
+ upload_dir = certificate.get("upload_dir")
1823
+ object_key = f"{upload_dir}/{file[0]}"
1824
+
1825
+ form_data = {
1826
+ "key": object_key,
1827
+ "policy": certificate.get("policy"),
1828
+ "OSSAccessKeyId": certificate.get("oss_access_key_id"),
1829
+ "signature": certificate.get("signature"),
1830
+ "success_action_status": "200",
1831
+ "x-oss-object-acl": certificate.get("x_oss_object_acl"),
1832
+ "x-oss-forbid-overwrite": certificate.get("x_oss_forbid_overwrite"),
1833
+ }
1834
+
1835
+ files = {"file": file}
1836
+
1837
+ try:
1838
+ response = await client.post(upload_host, data=form_data, files=files, timeout=3600)
1839
+ response.raise_for_status()
1840
+ # print("文件上传成功!")
1841
+ oss_url = f"oss://{object_key}"
1842
+ # print(f"文件OSS URL: {oss_url}")
1843
+ return oss_url
1844
+ except httpx.HTTPStatusError as e:
1845
+ print(f"上传文件失败: HTTP {e.response.status_code}")
1846
+ print(f"响应内容: {e.response.text}")
1847
+ return None
1848
+ except Exception as e:
1849
+ print(f"上传文件时发生未知错误: {e}")
1850
+ return None
1851
+
1825
1852
  async def get_whisper_payload(request, engine, provider, api_key=None):
1826
1853
  model_dict = get_model_dict(provider)
1827
1854
  original_model = model_dict[request.model]
1828
- headers = {
1829
- # "Content-Type": "multipart/form-data",
1830
- }
1855
+ headers = {}
1831
1856
  if api_key:
1832
1857
  headers['Authorization'] = f"Bearer {api_key}"
1833
1858
  url = provider['base_url']
1834
1859
  url = BaseAPI(url).audio_transcriptions
1835
1860
 
1836
- payload = {
1837
- "model": original_model,
1838
- "file": request.file,
1839
- }
1861
+ if "dashscope.aliyuncs.com" in url:
1862
+ client = httpx.AsyncClient()
1863
+ certificate = await get_upload_certificate(client, api_key, original_model)
1864
+ if not certificate:
1865
+ return
1866
+
1867
+ # 步骤 2: 上传文件
1868
+ oss_url = await upload_file_to_oss(client, certificate, request.file)
1869
+ headers = {
1870
+ "Authorization": f"Bearer {api_key}",
1871
+ "Content-Type": "application/json",
1872
+ "X-DashScope-OssResourceResolve": "enable"
1873
+ }
1874
+ payload = {
1875
+ "model": original_model,
1876
+ "input": {
1877
+ "messages": [
1878
+ {
1879
+ "role": "user",
1880
+ "content": [{"audio": oss_url}]
1881
+ }
1882
+ ]
1883
+ }
1884
+ }
1885
+ else:
1886
+ payload = {
1887
+ "model": original_model,
1888
+ "file": request.file,
1889
+ }
1840
1890
 
1841
1891
  if request.prompt:
1842
1892
  payload["prompt"] = request.prompt
@@ -1906,11 +1956,20 @@ async def get_tts_payload(request, engine, provider, api_key=None):
1906
1956
  url = provider['base_url']
1907
1957
  url = BaseAPI(url).audio_speech
1908
1958
 
1909
- payload = {
1910
- "model": original_model,
1911
- "input": request.input,
1912
- "voice": request.voice,
1913
- }
1959
+ if "api.minimaxi.com" in url:
1960
+ payload = {
1961
+ "model": original_model,
1962
+ "text": request.input,
1963
+ "voice_setting": {
1964
+ "voice_id": request.voice
1965
+ }
1966
+ }
1967
+ else:
1968
+ payload = {
1969
+ "model": original_model,
1970
+ "input": request.input,
1971
+ "voice": request.voice,
1972
+ }
1914
1973
 
1915
1974
  if request.response_format:
1916
1975
  payload["response_format"] = request.response_format
@@ -20,6 +20,34 @@ async def check_response(response, error_log):
20
20
  return {"error": f"{error_log} HTTP Error", "status_code": response.status_code, "details": error_json}
21
21
  return None
22
22
 
23
+ def gemini_json_poccess(response_str):
24
+ promptTokenCount = 0
25
+ candidatesTokenCount = 0
26
+ totalTokenCount = 0
27
+ image_base64 = None
28
+
29
+ response_json = json.loads(response_str)
30
+ json_data = safe_get(response_json, "candidates", 0, "content", default=None)
31
+ finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
32
+ if finishReason:
33
+ promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
34
+ candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
35
+ totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
36
+
37
+ content = safe_get(json_data, "parts", 0, "text", default="")
38
+ b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
39
+ if b64_json:
40
+ image_base64 = b64_json
41
+
42
+ is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
43
+
44
+ function_call_name = safe_get(json_data, "functionCall", "name", default=None)
45
+ function_full_response = json.dumps(safe_get(json_data, "functionCall", "args", default=""))
46
+
47
+ blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
48
+
49
+ return is_thinking, content, image_base64, function_call_name, function_full_response, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
50
+
23
51
  async def fetch_gemini_response_stream(client, url, headers, payload, model):
24
52
  timestamp = int(datetime.timestamp(datetime.now()))
25
53
  async with client.stream('POST', url, headers=headers, json=payload) as response:
@@ -28,131 +56,54 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
28
56
  yield error_message
29
57
  return
30
58
  buffer = ""
31
- cache_buffer = ""
32
- revicing_function_call = False
33
- function_full_response = "{"
34
- need_function_call = False
35
- is_finish = False
36
59
  promptTokenCount = 0
37
60
  candidatesTokenCount = 0
38
61
  totalTokenCount = 0
39
62
  parts_json = ""
40
- image_base64 = ""
41
- # line_index = 0
42
- # last_text_line = 0
43
- # if "thinking" in model:
44
- # is_thinking = True
45
- # else:
46
- # is_thinking = False
47
63
  async for chunk in response.aiter_text():
48
64
  buffer += chunk
49
65
  cache_buffer += chunk
50
66
 
51
67
  while "\n" in buffer:
52
68
  line, buffer = buffer.split("\n", 1)
53
- # line_index += 1
54
69
  if line.startswith("data: "):
55
- json_line = line.lstrip("data: ").strip()
56
- response_json = json.loads(json_line)
57
- json_data = safe_get(response_json, "candidates", 0, "content", default=None)
58
- finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
59
- if finishReason:
60
- promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
61
- candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
62
- totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
63
-
64
- content = safe_get(json_data, "parts", 0, "text", default="")
65
- b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
66
- if b64_json:
67
- image_base64 = b64_json
68
-
69
- is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
70
- if is_thinking:
71
- sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
72
- yield sse_string
73
- elif not image_base64 and content:
74
- sse_string = await generate_sse_response(timestamp, model, content=content)
75
- yield sse_string
76
-
77
- continue
78
-
79
- # https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
80
- if line and '\"finishReason\": \"' in line:
81
- if "stop" not in line.lower():
82
- logger.error(f"finishReason: {line}")
83
- is_finish = True
84
- if is_finish and '\"promptTokenCount\": ' in line:
85
- json_data = parse_json_safely( "{" + line + "}")
86
- promptTokenCount = json_data.get('promptTokenCount', 0)
87
- if is_finish and '\"candidatesTokenCount\": ' in line:
88
- json_data = parse_json_safely( "{" + line + "}")
89
- candidatesTokenCount = json_data.get('candidatesTokenCount', 0)
90
- if is_finish and '\"totalTokenCount\": ' in line:
91
- json_data = parse_json_safely( "{" + line + "}")
92
- totalTokenCount = json_data.get('totalTokenCount', 0)
93
-
94
- if (line and '"parts": [' in line or parts_json != "") and is_finish == False:
70
+ parts_json = line.lstrip("data: ").strip()
71
+ else:
95
72
  parts_json += line
96
- if parts_json != "" and line and '],' == line.strip():
97
- # tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
98
- tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{")
99
- if "inlineData" in tmp_parts_json:
100
- tmp_parts_json = tmp_parts_json + "}}]}"
101
- else:
102
- tmp_parts_json = tmp_parts_json + "}]}"
73
+ parts_json = parts_json.lstrip("[,")
103
74
  try:
104
- json_data = json.loads(tmp_parts_json)
105
-
106
- content = safe_get(json_data, "parts", 0, "text", default="")
107
- b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
108
- if b64_json:
109
- image_base64 = b64_json
110
-
111
- is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
112
- if is_thinking:
113
- sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
114
- yield sse_string
115
- elif not image_base64 and content:
116
- sse_string = await generate_sse_response(timestamp, model, content=content)
117
- yield sse_string
75
+ json.loads(parts_json)
118
76
  except json.JSONDecodeError:
119
- logger.error(f"无法解析JSON: {parts_json}")
120
- parts_json = ""
121
-
122
- if line and ('\"functionCall\": {' in line or revicing_function_call):
123
- revicing_function_call = True
124
- need_function_call = True
125
- if ']' in line:
126
- revicing_function_call = False
127
77
  continue
128
78
 
129
- function_full_response += line
79
+ # https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
80
+ is_thinking, content, image_base64, function_call_name, function_full_response, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
130
81
 
131
- if image_base64:
132
- yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
133
- return
82
+ if is_thinking:
83
+ sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
84
+ yield sse_string
85
+ elif not image_base64 and content:
86
+ sse_string = await generate_sse_response(timestamp, model, content=content)
87
+ yield sse_string
134
88
 
135
- if need_function_call:
136
- function_call = json.loads(function_full_response)
137
- function_call_name = function_call["functionCall"]["name"]
138
- sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
139
- yield sse_string
140
- function_full_response = json.dumps(function_call["functionCall"]["args"])
141
- sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
142
- yield sse_string
89
+ if image_base64:
90
+ yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
143
91
 
144
- cache_buffer_json = {}
145
- try:
146
- cache_buffer_json = json.loads(cache_buffer)
147
- except json.JSONDecodeError:
148
- cache_buffer_json = {}
92
+ if function_call_name:
93
+ sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
94
+ yield sse_string
95
+ if function_full_response:
96
+ sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
97
+ yield sse_string
149
98
 
150
- if cache_buffer == "[]" or safe_get(cache_buffer_json, 0, "promptFeedback", "blockReason") == "PROHIBITED_CONTENT":
151
- sse_string = await generate_sse_response(timestamp, model, stop="PROHIBITED_CONTENT")
152
- yield sse_string
153
- else:
154
- sse_string = await generate_sse_response(timestamp, model, stop="stop")
155
- yield sse_string
99
+ if parts_json == "[]" or blockReason == "PROHIBITED_CONTENT":
100
+ sse_string = await generate_sse_response(timestamp, model, stop="PROHIBITED_CONTENT")
101
+ yield sse_string
102
+ else:
103
+ sse_string = await generate_sse_response(timestamp, model, stop="stop")
104
+ yield sse_string
105
+
106
+ parts_json = ""
156
107
 
157
108
  sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, totalTokenCount, promptTokenCount, candidatesTokenCount)
158
109
  yield sse_string
@@ -666,6 +617,10 @@ async def fetch_response(client, url, headers, payload, engine, model):
666
617
 
667
618
  yield response_json
668
619
 
620
+ elif "dashscope.aliyuncs.com" in url and "multimodal-generation" in url:
621
+ response_json = response.json()
622
+ content = safe_get(response_json, "output", "choices", 0, "message", "content", 0, default=None)
623
+ yield content
669
624
  else:
670
625
  response_json = response.json()
671
626
  yield response_json
@@ -49,10 +49,16 @@ class BaseAPI:
49
49
  self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
50
50
  self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
51
51
  self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
52
- self.audio_transcriptions: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/transcriptions",) + ("",) * 3)
52
+ if parsed_url.hostname == "dashscope.aliyuncs.com":
53
+ self.audio_transcriptions: str = urlunparse(parsed_url[:2] + ("/api/v1/services/aigc/multimodal-generation/generation",) + ("",) * 3)
54
+ else:
55
+ self.audio_transcriptions: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/transcriptions",) + ("",) * 3)
53
56
  self.moderations: str = urlunparse(parsed_url[:2] + (before_v1 + "moderations",) + ("",) * 3)
54
57
  self.embeddings: str = urlunparse(parsed_url[:2] + (before_v1 + "embeddings",) + ("",) * 3)
55
- self.audio_speech: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/speech",) + ("",) * 3)
58
+ if parsed_url.hostname == "api.minimaxi.com":
59
+ self.audio_speech: str = urlunparse(parsed_url[:2] + ("v1/t2a_v2",) + ("",) * 3)
60
+ else:
61
+ self.audio_speech: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/speech",) + ("",) * 3)
56
62
 
57
63
  if parsed_url.hostname == "generativelanguage.googleapis.com":
58
64
  self.base_url = api_url
@@ -440,8 +446,23 @@ c4 = ThreadSafeCircularList(["us-east5", "us-central1", "europe-west4", "asia-so
440
446
  c3h = ThreadSafeCircularList(["us-east5", "us-central1", "europe-west1", "europe-west4"])
441
447
  gemini1 = ThreadSafeCircularList(["us-central1", "us-east4", "us-west1", "us-west4", "europe-west1", "europe-west2"])
442
448
  gemini2 = ThreadSafeCircularList(["us-central1"])
443
- gemini2_5_pro_exp = ThreadSafeCircularList(["global"])
444
-
449
+ # gemini2_5_pro_exp = ThreadSafeCircularList(["global"])
450
+ gemini2_5_pro_exp = ThreadSafeCircularList([
451
+ "us-central1",
452
+ "us-east1",
453
+ "us-east4",
454
+ "us-east5",
455
+ "us-south1",
456
+ "us-west1",
457
+ "us-west4",
458
+ "europe-central2",
459
+ "europe-north1",
460
+ "europe-southwest1",
461
+ "europe-west1",
462
+ "europe-west4",
463
+ "europe-west8",
464
+ "europe-west9"
465
+ ])
445
466
 
446
467
 
447
468
  # end_of_line = "\n\r\n"
@@ -34,6 +34,7 @@ class TaskManager:
34
34
  self.task_cache_file.touch(exist_ok=True)
35
35
  self.read_tasks_cache()
36
36
  self.set_task_cache("root_path", str(self.root_path))
37
+ self.resume_all_running_task()
37
38
 
38
39
  def set_task_cache(self, *keys_and_value):
39
40
  """
@@ -86,6 +87,12 @@ class TaskManager:
86
87
  self.set_task_cache(task_id, "status", TaskStatus.RUNNING.value)
87
88
  return task_ids
88
89
 
90
+ def resume_all_running_task(self):
91
+ running_task_id_list = [task_id for task_id, task in self.tasks_cache.items() if task_id != "root_path" and task.get("status") == "RUNNING"]
92
+ for task_id in running_task_id_list:
93
+ tasks_params = self.tasks_cache[task_id]["args"]
94
+ task_id = task_manager.resume_task(task_id, worker_fun, tasks_params)
95
+
89
96
  def resume_task(self, task_id, task_coro, args):
90
97
  """
91
98
  恢复一个任务。
@@ -95,7 +102,7 @@ class TaskManager:
95
102
  return TaskStatus.NOT_FOUND
96
103
 
97
104
  coro = task_coro(**args)
98
- task_id = self.create_task(coro)
105
+ task_id = self.create_task(coro, task_id)
99
106
  self.set_task_cache(task_id, "args", args)
100
107
  self.set_task_cache(task_id, "status", TaskStatus.RUNNING.value)
101
108
  print(f"任务已恢复: ID={task_id}, Name={task_id}")
@@ -103,7 +110,7 @@ class TaskManager:
103
110
  print(f"self.tasks_cache: {json.dumps(self.tasks_cache, ensure_ascii=False, indent=4)}")
104
111
  return task_id
105
112
 
106
- def create_task(self, coro):
113
+ def create_task(self, coro, task_id=None):
107
114
  """
108
115
  创建并注册一个新任务。
109
116
 
@@ -114,7 +121,8 @@ class TaskManager:
114
121
  Returns:
115
122
  str: 任务的唯一ID。
116
123
  """
117
- task_id = str(uuid.uuid4())
124
+ if task_id == None:
125
+ task_id = str(uuid.uuid4())
118
126
  task_name = f"Task-{task_id[:8]}"
119
127
 
120
128
  # 使用 asyncio.create_task() 创建任务
@@ -233,7 +241,7 @@ def create_task(goal, tools, work_dir):
233
241
  Args:
234
242
  goal (str): 需要完成的具体任务目标描述。子任务将围绕此目标进行工作。必须清晰、具体。必须包含背景信息,完成指标等。写清楚什么时候算任务完成,同时交代清楚任务的背景信息,这个背景信息可以是需要读取的文件等一切有助于完成任务的信息。
235
243
  tools (list[str]): 一个包含可用工具函数对象的列表。子任务在执行任务时可能会调用这些工具来与环境交互(例如读写文件、执行命令等)。
236
- work_dir (str): 工作目录的绝对路径。子任务将在此目录上下文中执行操作。子任务的工作目录位置在主任务的工作目录的子目录。
244
+ work_dir (str): 工作目录的绝对路径。子任务将在此目录上下文中执行操作。子任务的工作目录位置在主任务的工作目录的子目录。子任务工作目录**禁止**设置为主任务目录本身。
237
245
 
238
246
  Returns:
239
247
  str: 当任务成功完成时,返回字符串 "任务已完成"。
@@ -276,6 +284,9 @@ async def get_task_result():
276
284
  Returns:
277
285
  str: 子任务的执行结果。
278
286
  """
287
+ running_tasks_num = len([task_id for task_id, task in task_manager.tasks_cache.items() if task_id != "root_path" and task.get("status") == "RUNNING"])
288
+ if running_tasks_num == 0:
289
+ return "All tasks are finished."
279
290
  task_id, status, result = await task_manager.get_next_result()
280
291
 
281
292
  unfinished_tasks = [task_id for task_id, task in task_manager.tasks_cache.items() if task_id != "root_path" and task.get("status") != "DONE"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.2.33
3
+ Version: 0.2.35
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -2,14 +2,14 @@ beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
2
2
  beswarm/prompt.py,sha256=5JMfOuXWHscsaeDzwBn223mj9N85eAQdOHXQZk7zeWE,32238
3
3
  beswarm/utils.py,sha256=xxbNifOPlfcVkKmF_qFzuEnZgF3MQg3mnOfz1EF0Qss,6697
4
4
  beswarm/aient/main.py,sha256=SiYAIgQlLJqYusnTVEJOx1WNkSJKMImhgn5aWjfroxg,3814
5
- beswarm/aient/setup.py,sha256=MglF5g_LQLmX62ZgR1FmQUZanljhQx2EwLPPJjT4zac,487
5
+ beswarm/aient/setup.py,sha256=LqjY1x8CQrcvFrHKFSyZpm5h6iBuosHIpZqXdCPnPes,487
6
6
  beswarm/aient/src/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
7
7
  beswarm/aient/src/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
8
8
  beswarm/aient/src/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
9
9
  beswarm/aient/src/aient/core/models.py,sha256=d4MISNezTSe0ls0-fjuToI2SoT-sk5fWqAJuKVinIlo,7502
10
- beswarm/aient/src/aient/core/request.py,sha256=7KpHHDyu0t2g4IQGqlV_UxK0zAAjN_-CZpyd7S5pxS0,74263
11
- beswarm/aient/src/aient/core/response.py,sha256=-28HYKuzgfC1y7VOrYLk75_QH5yh6c1IS024yoQM0mg,35671
12
- beswarm/aient/src/aient/core/utils.py,sha256=NcXdb8zBN0GE01OGaUzg8U34RaraoFf2MaLDDGFvvC4,27492
10
+ beswarm/aient/src/aient/core/request.py,sha256=1tedDQf8GRv5Y7rYNE_596vQb4o7e1icaKAA7lIl4YY,76114
11
+ beswarm/aient/src/aient/core/response.py,sha256=Ba0BwsIN2ozZC_UInkGS07qKlpo3dIei6rw0INQ66BE,33086
12
+ beswarm/aient/src/aient/core/utils.py,sha256=8TR442o3VV7Kl9l6f6LlmOUQ1UDZ-aXMzQqm-qIrqE4,28166
13
13
  beswarm/aient/src/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
14
14
  beswarm/aient/src/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
15
15
  beswarm/aient/src/aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -135,9 +135,9 @@ beswarm/tools/request_input.py,sha256=gXNAJPOJektMqxJVyzNTFOeMQ7xUkO-wWMYH-r2Rdw
135
135
  beswarm/tools/screenshot.py,sha256=u6t8FCgW5YHJ_Oc4coo8e0F3wTusWE_-H8dFh1rBq9Q,1011
136
136
  beswarm/tools/search_arxiv.py,sha256=caVIUOzMhFu-r_gVgJZrH2EO9xI5iV_qLAg0b3Ie9Xg,8095
137
137
  beswarm/tools/search_web.py,sha256=ybbdbJq80plooXLMiyjAMOSCEyZJ0hquGUpabBhfFx0,16195
138
- beswarm/tools/taskmanager.py,sha256=rVLB0xwteT-5y9svmJ68K65U3JuX9BJGvqNzyxPvjaI,12178
138
+ beswarm/tools/taskmanager.py,sha256=n7G6cH96Tcz57MfiOffISMMAfUtr49_uikkeoCDCeRg,12940
139
139
  beswarm/tools/worker.py,sha256=s6tN4JhA07qzTlP7xWiB0MjnBIJ6XSrtlJTA_RqG1_A,23539
140
- beswarm-0.2.33.dist-info/METADATA,sha256=76JECSt37iGRe_rmvWVqtcXfVcdL_iTfwtktCfnHKyY,3878
141
- beswarm-0.2.33.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
142
- beswarm-0.2.33.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
143
- beswarm-0.2.33.dist-info/RECORD,,
140
+ beswarm-0.2.35.dist-info/METADATA,sha256=cfwB-Cq_qEDmpCNZyzrFNjDtnikxw8IqmrKf0MZd_Yk,3878
141
+ beswarm-0.2.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
142
+ beswarm-0.2.35.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
143
+ beswarm-0.2.35.dist-info/RECORD,,