aient 1.1.50__py3-none-any.whl → 1.1.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/request.py CHANGED
@@ -3,6 +3,8 @@ import json
3
3
  import httpx
4
4
  import base64
5
5
  import urllib.parse
6
+ from io import IOBase
7
+ from typing import Tuple
6
8
 
7
9
  from .models import RequestModel, Message
8
10
  from .utils import (
@@ -239,30 +241,48 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
239
241
  ]
240
242
 
241
243
  if "gemini-2.5" in original_model:
242
- payload["generationConfig"]["thinkingConfig"] = {
243
- "includeThoughts": True,
244
- }
245
244
  # 从请求模型名中检测思考预算设置
246
245
  m = re.match(r".*-think-(-?\d+)", request.model)
247
246
  if m:
248
247
  try:
249
248
  val = int(m.group(1))
250
- if val > 32768 and "gemini-2.5-pro" in original_model:
251
- val = 32768
252
- elif val < 128 and "gemini-2.5-pro" in original_model:
253
- val = 128
254
- elif val <= 0:
255
- val = 0
256
- elif val > 24576:
257
- val = 24576
258
- payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
249
+ budget = None
250
+ # gemini-2.5-pro: [128, 32768]
251
+ if "gemini-2.5-pro" in original_model:
252
+ if val < 128:
253
+ budget = 128
254
+ elif val > 32768:
255
+ budget = 32768
256
+ else: # 128 <= val <= 32768
257
+ budget = val
258
+
259
+ # gemini-2.5-flash-lite: [0] or [512, 24576]
260
+ elif "gemini-2.5-flash-lite" in original_model:
261
+ if val > 0 and val < 512:
262
+ budget = 512
263
+ elif val > 24576:
264
+ budget = 24576
265
+ else: # Includes 0 and valid range, and clamps invalid negatives
266
+ budget = val if val >= 0 else 0
267
+
268
+ # gemini-2.5-flash (and other gemini-2.5 models as a fallback): [0, 24576]
269
+ else:
270
+ if val > 24576:
271
+ budget = 24576
272
+ else: # Includes 0 and valid range, and clamps invalid negatives
273
+ budget = val if val >= 0 else 0
274
+
275
+ payload["generationConfig"]["thinkingConfig"] = {
276
+ "includeThoughts": True if budget else False,
277
+ "thinkingBudget": budget
278
+ }
259
279
  except ValueError:
260
280
  # 如果转换为整数失败,忽略思考预算设置
261
281
  pass
262
-
263
- # # 检测search标签
264
- # if request.model.endswith("-search"):
265
- # payload["tools"] = [{"googleSearch": {}}]
282
+ else:
283
+ payload["generationConfig"]["thinkingConfig"] = {
284
+ "includeThoughts": True,
285
+ }
266
286
 
267
287
  if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
268
288
  for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
@@ -348,16 +368,12 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
348
368
  # search_tool = None
349
369
 
350
370
  # https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-0-flash?hl=zh-cn
351
- pro_models = ["gemini-2.5", "gemini-2.0"]
371
+ pro_models = ["gemini-2.5"]
352
372
  if any(pro_model in original_model for pro_model in pro_models):
353
- location = gemini2
373
+ location = gemini2_5_pro_exp
354
374
  else:
355
375
  location = gemini1
356
376
 
357
- if "gemini-2.5-flash-lite-preview-06-17" == original_model or \
358
- "gemini-2.5-pro-preview-06-05" == original_model:
359
- location = gemini2_5_pro_exp
360
-
361
377
  if "google-vertex-ai" in provider.get("base_url", ""):
362
378
  url = provider.get("base_url").rstrip('/') + "/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
363
379
  LOCATION=await location.next(),
@@ -366,24 +382,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
366
382
  stream=gemini_stream
367
383
  )
368
384
  elif api_key is not None and api_key[2] == ".":
369
- if provider.get("project_id") and "gemini-2.5-pro-preview-06-05" == original_model:
370
- if isinstance(provider.get("project_id"), list):
371
- api_key_index = provider.get("api").index(api_key)
372
- project_id = provider.get("project_id")[api_key_index]
373
- else:
374
- project_id = provider.get("project_id")
375
- url = f"https://aiplatform.googleapis.com/v1/projects/{project_id}/locations/global/publishers/google/models/{original_model}:{gemini_stream}?key={api_key}"
376
- else:
377
- url = f"https://aiplatform.googleapis.com/v1/publishers/google/models/{original_model}:{gemini_stream}?key={api_key}"
385
+ url = f"https://aiplatform.googleapis.com/v1/publishers/google/models/{original_model}:{gemini_stream}?key={api_key}"
378
386
  headers.pop("Authorization", None)
379
- elif "gemini-2.5-flash-lite-preview-06-17" == original_model or \
380
- "gemini-2.5-pro-preview-06-05" == original_model:
381
- url = "https://aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
382
- LOCATION=await location.next(),
383
- PROJECT_ID=project_id,
384
- MODEL_ID=original_model,
385
- stream=gemini_stream
386
- )
387
387
  else:
388
388
  url = "https://{LOCATION}-aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
389
389
  LOCATION=await location.next(),
@@ -535,29 +535,48 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
535
535
  payload["generationConfig"]["max_output_tokens"] = 8192
536
536
 
537
537
  if "gemini-2.5" in original_model:
538
- payload["generationConfig"]["thinkingConfig"] = {
539
- "includeThoughts": True,
540
- }
541
538
  # 从请求模型名中检测思考预算设置
542
539
  m = re.match(r".*-think-(-?\d+)", request.model)
543
540
  if m:
544
541
  try:
545
542
  val = int(m.group(1))
546
- if val > 32768 and "gemini-2.5-pro" in original_model:
547
- val = 32768
548
- elif val < 128 and "gemini-2.5-pro" in original_model:
549
- val = 128
550
- elif val <= 0:
551
- val = 0
552
- elif val > 24576:
553
- val = 24576
554
- payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
543
+ budget = None
544
+ # gemini-2.5-pro: [128, 32768]
545
+ if "gemini-2.5-pro" in original_model:
546
+ if val < 128:
547
+ budget = 128
548
+ elif val > 32768:
549
+ budget = 32768
550
+ else: # 128 <= val <= 32768
551
+ budget = val
552
+
553
+ # gemini-2.5-flash-lite: [0] or [512, 24576]
554
+ elif "gemini-2.5-flash-lite" in original_model:
555
+ if val > 0 and val < 512:
556
+ budget = 512
557
+ elif val > 24576:
558
+ budget = 24576
559
+ else: # Includes 0 and valid range, and clamps invalid negatives
560
+ budget = val if val >= 0 else 0
561
+
562
+ # gemini-2.5-flash (and other gemini-2.5 models as a fallback): [0, 24576]
563
+ else:
564
+ if val > 24576:
565
+ budget = 24576
566
+ else: # Includes 0 and valid range, and clamps invalid negatives
567
+ budget = val if val >= 0 else 0
568
+
569
+ payload["generationConfig"]["thinkingConfig"] = {
570
+ "includeThoughts": True if budget else False,
571
+ "thinkingBudget": budget
572
+ }
555
573
  except ValueError:
556
574
  # 如果转换为整数失败,忽略思考预算设置
557
575
  pass
558
-
559
- # if request.model.endswith("-search"):
560
- # payload["tools"] = [search_tool]
576
+ else:
577
+ payload["generationConfig"]["thinkingConfig"] = {
578
+ "includeThoughts": True,
579
+ }
561
580
 
562
581
  if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
563
582
  for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
@@ -1776,21 +1795,98 @@ async def get_dalle_payload(request, engine, provider, api_key=None):
1776
1795
 
1777
1796
  return url, headers, payload
1778
1797
 
1798
+ async def get_upload_certificate(client: httpx.AsyncClient, api_key: str, model: str) -> dict:
1799
+ """第一步:获取文件上传凭证"""
1800
+ # print("步骤 1: 正在获取上传凭证...")
1801
+ headers = {"Authorization": f"Bearer {api_key}"}
1802
+ params = {"action": "getPolicy", "model": model}
1803
+ try:
1804
+ response = await client.get("https://dashscope.aliyuncs.com/api/v1/uploads", headers=headers, params=params)
1805
+ response.raise_for_status() # 如果请求失败则抛出异常
1806
+ cert_data = response.json()
1807
+ # print("凭证获取成功。")
1808
+ return cert_data.get("data")
1809
+ except httpx.HTTPStatusError as e:
1810
+ print(f"获取凭证失败: HTTP {e.response.status_code}")
1811
+ print(f"响应内容: {e.response.text}")
1812
+ return None
1813
+ except Exception as e:
1814
+ print(f"获取凭证时发生未知错误: {e}")
1815
+ return None
1816
+
1817
+ from mimetypes import guess_type
1818
+
1819
+ async def upload_file_to_oss(client: httpx.AsyncClient, certificate: dict, file: Tuple[str, IOBase, str]) -> str:
1820
+ """第二步:使用凭证将文件内容上传到OSS"""
1821
+ upload_host = certificate.get("upload_host")
1822
+ upload_dir = certificate.get("upload_dir")
1823
+ object_key = f"{upload_dir}/{file[0]}"
1824
+
1825
+ form_data = {
1826
+ "key": object_key,
1827
+ "policy": certificate.get("policy"),
1828
+ "OSSAccessKeyId": certificate.get("oss_access_key_id"),
1829
+ "signature": certificate.get("signature"),
1830
+ "success_action_status": "200",
1831
+ "x-oss-object-acl": certificate.get("x_oss_object_acl"),
1832
+ "x-oss-forbid-overwrite": certificate.get("x_oss_forbid_overwrite"),
1833
+ }
1834
+
1835
+ files = {"file": file}
1836
+
1837
+ try:
1838
+ response = await client.post(upload_host, data=form_data, files=files, timeout=3600)
1839
+ response.raise_for_status()
1840
+ # print("文件上传成功!")
1841
+ oss_url = f"oss://{object_key}"
1842
+ # print(f"文件OSS URL: {oss_url}")
1843
+ return oss_url
1844
+ except httpx.HTTPStatusError as e:
1845
+ print(f"上传文件失败: HTTP {e.response.status_code}")
1846
+ print(f"响应内容: {e.response.text}")
1847
+ return None
1848
+ except Exception as e:
1849
+ print(f"上传文件时发生未知错误: {e}")
1850
+ return None
1851
+
1779
1852
  async def get_whisper_payload(request, engine, provider, api_key=None):
1780
1853
  model_dict = get_model_dict(provider)
1781
1854
  original_model = model_dict[request.model]
1782
- headers = {
1783
- # "Content-Type": "multipart/form-data",
1784
- }
1855
+ headers = {}
1785
1856
  if api_key:
1786
1857
  headers['Authorization'] = f"Bearer {api_key}"
1787
1858
  url = provider['base_url']
1788
1859
  url = BaseAPI(url).audio_transcriptions
1789
1860
 
1790
- payload = {
1791
- "model": original_model,
1792
- "file": request.file,
1793
- }
1861
+ if "dashscope.aliyuncs.com" in url:
1862
+ client = httpx.AsyncClient()
1863
+ certificate = await get_upload_certificate(client, api_key, original_model)
1864
+ if not certificate:
1865
+ return
1866
+
1867
+ # 步骤 2: 上传文件
1868
+ oss_url = await upload_file_to_oss(client, certificate, request.file)
1869
+ headers = {
1870
+ "Authorization": f"Bearer {api_key}",
1871
+ "Content-Type": "application/json",
1872
+ "X-DashScope-OssResourceResolve": "enable"
1873
+ }
1874
+ payload = {
1875
+ "model": original_model,
1876
+ "input": {
1877
+ "messages": [
1878
+ {
1879
+ "role": "user",
1880
+ "content": [{"audio": oss_url}]
1881
+ }
1882
+ ]
1883
+ }
1884
+ }
1885
+ else:
1886
+ payload = {
1887
+ "model": original_model,
1888
+ "file": request.file,
1889
+ }
1794
1890
 
1795
1891
  if request.prompt:
1796
1892
  payload["prompt"] = request.prompt
@@ -1860,11 +1956,20 @@ async def get_tts_payload(request, engine, provider, api_key=None):
1860
1956
  url = provider['base_url']
1861
1957
  url = BaseAPI(url).audio_speech
1862
1958
 
1863
- payload = {
1864
- "model": original_model,
1865
- "input": request.input,
1866
- "voice": request.voice,
1867
- }
1959
+ if "api.minimaxi.com" in url:
1960
+ payload = {
1961
+ "model": original_model,
1962
+ "text": request.input,
1963
+ "voice_setting": {
1964
+ "voice_id": request.voice
1965
+ }
1966
+ }
1967
+ else:
1968
+ payload = {
1969
+ "model": original_model,
1970
+ "input": request.input,
1971
+ "voice": request.voice,
1972
+ }
1868
1973
 
1869
1974
  if request.response_format:
1870
1975
  payload["response_format"] = request.response_format
aient/core/response.py CHANGED
@@ -666,6 +666,10 @@ async def fetch_response(client, url, headers, payload, engine, model):
666
666
 
667
667
  yield response_json
668
668
 
669
+ elif "dashscope.aliyuncs.com" in url and "multimodal-generation" in url:
670
+ response_json = response.json()
671
+ content = safe_get(response_json, "output", "choices", 0, "message", "content", 0, default=None)
672
+ yield content
669
673
  else:
670
674
  response_json = response.json()
671
675
  yield response_json
aient/core/utils.py CHANGED
@@ -49,10 +49,16 @@ class BaseAPI:
49
49
  self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
50
50
  self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
51
51
  self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
52
- self.audio_transcriptions: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/transcriptions",) + ("",) * 3)
52
+ if parsed_url.hostname == "dashscope.aliyuncs.com":
53
+ self.audio_transcriptions: str = urlunparse(parsed_url[:2] + ("/api/v1/services/aigc/multimodal-generation/generation",) + ("",) * 3)
54
+ else:
55
+ self.audio_transcriptions: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/transcriptions",) + ("",) * 3)
53
56
  self.moderations: str = urlunparse(parsed_url[:2] + (before_v1 + "moderations",) + ("",) * 3)
54
57
  self.embeddings: str = urlunparse(parsed_url[:2] + (before_v1 + "embeddings",) + ("",) * 3)
55
- self.audio_speech: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/speech",) + ("",) * 3)
58
+ if parsed_url.hostname == "api.minimaxi.com":
59
+ self.audio_speech: str = urlunparse(parsed_url[:2] + ("v1/t2a_v2",) + ("",) * 3)
60
+ else:
61
+ self.audio_speech: str = urlunparse(parsed_url[:2] + (before_v1 + "audio/speech",) + ("",) * 3)
56
62
 
57
63
  if parsed_url.hostname == "generativelanguage.googleapis.com":
58
64
  self.base_url = api_url
@@ -440,8 +446,23 @@ c4 = ThreadSafeCircularList(["us-east5", "us-central1", "europe-west4", "asia-so
440
446
  c3h = ThreadSafeCircularList(["us-east5", "us-central1", "europe-west1", "europe-west4"])
441
447
  gemini1 = ThreadSafeCircularList(["us-central1", "us-east4", "us-west1", "us-west4", "europe-west1", "europe-west2"])
442
448
  gemini2 = ThreadSafeCircularList(["us-central1"])
443
- gemini2_5_pro_exp = ThreadSafeCircularList(["global"])
444
-
449
+ # gemini2_5_pro_exp = ThreadSafeCircularList(["global"])
450
+ gemini2_5_pro_exp = ThreadSafeCircularList([
451
+ "us-central1",
452
+ "us-east1",
453
+ "us-east4",
454
+ "us-east5",
455
+ "us-south1",
456
+ "us-west1",
457
+ "us-west4",
458
+ "europe-central2",
459
+ "europe-north1",
460
+ "europe-southwest1",
461
+ "europe-west1",
462
+ "europe-west4",
463
+ "europe-west8",
464
+ "europe-west9"
465
+ ])
445
466
 
446
467
 
447
468
  # end_of_line = "\n\r\n"
@@ -44,6 +44,8 @@ def url_to_markdown(url):
44
44
  body = body[0]
45
45
  body = Cleaner(javascript=True, style=True).clean_html(body)
46
46
  return ''.join(lxml.html.tostring(c, encoding='unicode') for c in body)
47
+ except ImportError as e:
48
+ raise e
47
49
  except Exception as e:
48
50
  # print('\033[31m')
49
51
  # print("error: url_to_markdown url", url)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.50
3
+ Version: 1.1.52
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -4,9 +4,9 @@ aient/core/.gitignore,sha256=5JRRlYYsqt_yt6iFvvzhbqh2FTUQMqwo6WwIuFzlGR8,13
4
4
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
5
5
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
6
6
  aient/core/models.py,sha256=d4MISNezTSe0ls0-fjuToI2SoT-sk5fWqAJuKVinIlo,7502
7
- aient/core/request.py,sha256=8HlSFaBhWMs5thhL4C5qj-hvuDZWUXCYWwVShFR99QU,72263
8
- aient/core/response.py,sha256=-28HYKuzgfC1y7VOrYLk75_QH5yh6c1IS024yoQM0mg,35671
9
- aient/core/utils.py,sha256=NcXdb8zBN0GE01OGaUzg8U34RaraoFf2MaLDDGFvvC4,27492
7
+ aient/core/request.py,sha256=LMt80dfn-XTQ-6_yRkEIT5QMN-hAPX52sAkKQ2hAHbM,76065
8
+ aient/core/response.py,sha256=FKcUH4f0oD0mvOB_ZWqT05ZefZfawwZS-it5djR4ETU,35915
9
+ aient/core/utils.py,sha256=8TR442o3VV7Kl9l6f6LlmOUQ1UDZ-aXMzQqm-qIrqE4,28166
10
10
  aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
11
11
  aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
12
12
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -32,13 +32,13 @@ aient/plugins/read_image.py,sha256=4FbIiMNVFUQpNyiH5ApGSRvOD9ujcXGyuqlGTJMd7ac,4
32
32
  aient/plugins/readonly.py,sha256=qK5-kBM3NDH1b-otFxFHpAjV5BXEY_e7cTWBcpP7G5k,710
33
33
  aient/plugins/registry.py,sha256=YknzhieU_8nQ3oKlUSSWDB4X7t2Jx0JnqT2Jd9Xsvfk,3574
34
34
  aient/plugins/run_python.py,sha256=MohvdtZUTDLrHBDtJ9L2_Qu1pWAGrkbzsGmmn5tMN20,4614
35
- aient/plugins/websearch.py,sha256=LPS5NmHrY-Rc0FCPlhHrUWE90XJmXF_AvShLHTV_Zqc,15285
35
+ aient/plugins/websearch.py,sha256=9yImBa1s5V7Djqzx6L4naDyIsGIcf_js1LOyLX0aNHw,15338
36
36
  aient/plugins/write_file.py,sha256=hExFLuoNPtjYxJI3pVbofZRpokvUabpXdEkd3mZJPPc,3778
37
37
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
39
39
  aient/utils/scripts.py,sha256=h7EA2xBydUF_wdZLsPgjCq4Egdycx1gf2qrdrm0I7y0,40909
40
- aient-1.1.50.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
41
- aient-1.1.50.dist-info/METADATA,sha256=4tCnfHz_FSAHEe1g8SkkXZbDK2VGHTSmJL3ea1H-VzM,4968
42
- aient-1.1.50.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
43
- aient-1.1.50.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
44
- aient-1.1.50.dist-info/RECORD,,
40
+ aient-1.1.52.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
41
+ aient-1.1.52.dist-info/METADATA,sha256=2NHf_SCLBcs1AfTuk_WP92BxNpUmSErYHRC1nly_9pE,4968
42
+ aient-1.1.52.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
43
+ aient-1.1.52.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
44
+ aient-1.1.52.dist-info/RECORD,,
File without changes