aient 1.2.49__tar.gz → 1.2.50__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {aient-1.2.49 → aient-1.2.50}/PKG-INFO +1 -1
  2. {aient-1.2.49 → aient-1.2.50}/aient/core/request.py +27 -27
  3. {aient-1.2.49 → aient-1.2.50}/aient/core/utils.py +58 -19
  4. {aient-1.2.49 → aient-1.2.50}/aient.egg-info/PKG-INFO +1 -1
  5. {aient-1.2.49 → aient-1.2.50}/pyproject.toml +1 -1
  6. {aient-1.2.49 → aient-1.2.50}/LICENSE +0 -0
  7. {aient-1.2.49 → aient-1.2.50}/README.md +0 -0
  8. {aient-1.2.49 → aient-1.2.50}/aient/__init__.py +0 -0
  9. {aient-1.2.49 → aient-1.2.50}/aient/architext/architext/__init__.py +0 -0
  10. {aient-1.2.49 → aient-1.2.50}/aient/architext/architext/core.py +0 -0
  11. {aient-1.2.49 → aient-1.2.50}/aient/architext/test/openai_client.py +0 -0
  12. {aient-1.2.49 → aient-1.2.50}/aient/architext/test/test.py +0 -0
  13. {aient-1.2.49 → aient-1.2.50}/aient/architext/test/test_save_load.py +0 -0
  14. {aient-1.2.49 → aient-1.2.50}/aient/core/__init__.py +0 -0
  15. {aient-1.2.49 → aient-1.2.50}/aient/core/log_config.py +0 -0
  16. {aient-1.2.49 → aient-1.2.50}/aient/core/models.py +0 -0
  17. {aient-1.2.49 → aient-1.2.50}/aient/core/response.py +0 -0
  18. {aient-1.2.49 → aient-1.2.50}/aient/core/test/test_base_api.py +0 -0
  19. {aient-1.2.49 → aient-1.2.50}/aient/core/test/test_geminimask.py +0 -0
  20. {aient-1.2.49 → aient-1.2.50}/aient/core/test/test_image.py +0 -0
  21. {aient-1.2.49 → aient-1.2.50}/aient/core/test/test_payload.py +0 -0
  22. {aient-1.2.49 → aient-1.2.50}/aient/models/__init__.py +0 -0
  23. {aient-1.2.49 → aient-1.2.50}/aient/models/audio.py +0 -0
  24. {aient-1.2.49 → aient-1.2.50}/aient/models/base.py +0 -0
  25. {aient-1.2.49 → aient-1.2.50}/aient/models/chatgpt.py +0 -0
  26. {aient-1.2.49 → aient-1.2.50}/aient/plugins/__init__.py +0 -0
  27. {aient-1.2.49 → aient-1.2.50}/aient/plugins/arXiv.py +0 -0
  28. {aient-1.2.49 → aient-1.2.50}/aient/plugins/config.py +0 -0
  29. {aient-1.2.49 → aient-1.2.50}/aient/plugins/excute_command.py +0 -0
  30. {aient-1.2.49 → aient-1.2.50}/aient/plugins/get_time.py +0 -0
  31. {aient-1.2.49 → aient-1.2.50}/aient/plugins/image.py +0 -0
  32. {aient-1.2.49 → aient-1.2.50}/aient/plugins/list_directory.py +0 -0
  33. {aient-1.2.49 → aient-1.2.50}/aient/plugins/read_image.py +0 -0
  34. {aient-1.2.49 → aient-1.2.50}/aient/plugins/readonly.py +0 -0
  35. {aient-1.2.49 → aient-1.2.50}/aient/plugins/registry.py +0 -0
  36. {aient-1.2.49 → aient-1.2.50}/aient/plugins/run_python.py +0 -0
  37. {aient-1.2.49 → aient-1.2.50}/aient/plugins/websearch.py +0 -0
  38. {aient-1.2.49 → aient-1.2.50}/aient/utils/__init__.py +0 -0
  39. {aient-1.2.49 → aient-1.2.50}/aient/utils/prompt.py +0 -0
  40. {aient-1.2.49 → aient-1.2.50}/aient/utils/scripts.py +0 -0
  41. {aient-1.2.49 → aient-1.2.50}/aient.egg-info/SOURCES.txt +0 -0
  42. {aient-1.2.49 → aient-1.2.50}/aient.egg-info/dependency_links.txt +0 -0
  43. {aient-1.2.49 → aient-1.2.50}/aient.egg-info/requires.txt +0 -0
  44. {aient-1.2.49 → aient-1.2.50}/aient.egg-info/top_level.txt +0 -0
  45. {aient-1.2.49 → aient-1.2.50}/setup.cfg +0 -0
  46. {aient-1.2.49 → aient-1.2.50}/test/test_Web_crawler.py +0 -0
  47. {aient-1.2.49 → aient-1.2.50}/test/test_ddg_search.py +0 -0
  48. {aient-1.2.49 → aient-1.2.50}/test/test_google_search.py +0 -0
  49. {aient-1.2.49 → aient-1.2.50}/test/test_ollama.py +0 -0
  50. {aient-1.2.49 → aient-1.2.50}/test/test_plugin.py +0 -0
  51. {aient-1.2.49 → aient-1.2.50}/test/test_url.py +0 -0
  52. {aient-1.2.49 → aient-1.2.50}/test/test_whisper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.49
3
+ Version: 1.2.50
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1,13 +1,24 @@
1
1
  import re
2
2
  import json
3
3
  import copy
4
+ import hmac
5
+ import time
4
6
  import httpx
5
7
  import base64
6
8
  import asyncio
9
+ import hashlib
10
+ import datetime
11
+ import urllib.parse
7
12
  from io import IOBase
8
13
  from typing import Tuple
14
+ from datetime import timezone
9
15
  from urllib.parse import urlparse
10
16
 
17
+
18
+ from cryptography.hazmat.primitives import hashes
19
+ from cryptography.hazmat.primitives.asymmetric import padding
20
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
21
+
11
22
  from .models import RequestModel, Message
12
23
  from .utils import (
13
24
  c3s,
@@ -16,7 +27,7 @@ from .utils import (
16
27
  c35s,
17
28
  c4,
18
29
  gemini1,
19
- gemini2,
30
+ gemini_preview,
20
31
  gemini2_5_pro_exp,
21
32
  BaseAPI,
22
33
  safe_get,
@@ -57,7 +68,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
57
68
 
58
69
  try:
59
70
  request_messages = [Message(role="user", content=request.prompt)]
60
- except:
71
+ except Exception:
61
72
  request_messages = copy.deepcopy(request.messages)
62
73
  for msg in request_messages:
63
74
  if msg.role == "assistant":
@@ -300,11 +311,6 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
300
311
 
301
312
  return url, headers, payload
302
313
 
303
- import time
304
- from cryptography.hazmat.primitives import hashes
305
- from cryptography.hazmat.primitives.asymmetric import padding
306
- from cryptography.hazmat.primitives.serialization import load_pem_private_key
307
-
308
314
  def create_jwt(client_email, private_key):
309
315
  # JWT Header
310
316
  header = json.dumps({
@@ -375,12 +381,14 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
375
381
 
376
382
  # https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-0-flash?hl=zh-cn
377
383
  pro_models = ["gemini-2.5"]
378
- if any(pro_model in original_model for pro_model in pro_models):
384
+ if "gemini-2.5-flash-image-preview" in original_model:
385
+ location = gemini_preview
386
+ elif any(pro_model in original_model for pro_model in pro_models):
379
387
  location = gemini2_5_pro_exp
380
388
  else:
381
389
  location = gemini1
382
390
 
383
- if "google-vertex-ai" in provider.get("base_url", ""):
391
+ if "google-vertex-ai" in provider.get("base_url", "") or "gemini-2.5-flash-image-preview" in original_model:
384
392
  url = provider.get("base_url").rstrip('/') + "/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
385
393
  LOCATION=await location.next(),
386
394
  PROJECT_ID=project_id,
@@ -754,18 +762,12 @@ async def get_vertex_claude_payload(request, engine, provider, api_key=None):
754
762
  "type": "any"
755
763
  }
756
764
 
757
- if provider.get("tools") == False:
765
+ if provider.get("tools") is False:
758
766
  payload.pop("tools", None)
759
767
  payload.pop("tool_choice", None)
760
768
 
761
769
  return url, headers, payload
762
770
 
763
- import hashlib
764
- import hmac
765
- import datetime
766
- import urllib.parse
767
- from datetime import timezone
768
-
769
771
  def sign(key, msg):
770
772
  return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
771
773
 
@@ -837,7 +839,7 @@ async def get_aws_payload(request, engine, provider, api_key=None):
837
839
  url = f"{base_url}/model/{original_model}/invoke-with-response-stream"
838
840
 
839
841
  messages = []
840
- system_prompt = None
842
+ # system_prompt = None
841
843
  tool_id = None
842
844
  for msg in request.messages:
843
845
  tool_call_id = None
@@ -887,8 +889,8 @@ async def get_aws_payload(request, engine, provider, api_key=None):
887
889
  }]})
888
890
  elif msg.role != "system":
889
891
  messages.append({"role": msg.role, "content": content})
890
- elif msg.role == "system":
891
- system_prompt = content
892
+ # elif msg.role == "system":
893
+ # system_prompt = content
892
894
 
893
895
  conversation_len = len(messages) - 1
894
896
  message_index = 0
@@ -971,7 +973,7 @@ async def get_aws_payload(request, engine, provider, api_key=None):
971
973
  "type": "any"
972
974
  }
973
975
 
974
- if provider.get("tools") == False:
976
+ if provider.get("tools") is False:
975
977
  payload.pop("tools", None)
976
978
  payload.pop("tool_choice", None)
977
979
 
@@ -1079,7 +1081,7 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1079
1081
  else:
1080
1082
  payload[field] = value
1081
1083
 
1082
- if provider.get("tools") == False or "o1-mini" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
1084
+ if provider.get("tools") is False or "o1-mini" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
1083
1085
  payload.pop("tools", None)
1084
1086
  payload.pop("tool_choice", None)
1085
1087
 
@@ -1241,7 +1243,7 @@ async def get_azure_payload(request, engine, provider, api_key=None):
1241
1243
  else:
1242
1244
  payload[field] = value
1243
1245
 
1244
- if provider.get("tools") == False or "o1" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
1246
+ if provider.get("tools") is False or "o1" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
1245
1247
  payload.pop("tools", None)
1246
1248
  payload.pop("tool_choice", None)
1247
1249
 
@@ -1336,7 +1338,7 @@ async def get_azure_databricks_payload(request, engine, provider, api_key=None):
1336
1338
  else:
1337
1339
  payload[field] = value
1338
1340
 
1339
- if provider.get("tools") == False or "o1" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
1341
+ if provider.get("tools") is False or "o1" in original_model or "chatgpt-4o-latest" in original_model or "grok" in original_model:
1340
1342
  payload.pop("tools", None)
1341
1343
  payload.pop("tool_choice", None)
1342
1344
 
@@ -1626,7 +1628,7 @@ async def gpt2claude_tools_json(json_dict):
1626
1628
  for old_key, new_key in keys_to_change.items():
1627
1629
  if old_key in json_dict:
1628
1630
  if new_key:
1629
- if json_dict[old_key] == None:
1631
+ if json_dict[old_key] is None:
1630
1632
  json_dict[old_key] = {
1631
1633
  "type": "object",
1632
1634
  "properties": {}
@@ -1788,7 +1790,7 @@ async def get_claude_payload(request, engine, provider, api_key=None):
1788
1790
  "type": "any"
1789
1791
  }
1790
1792
 
1791
- if provider.get("tools") == False:
1793
+ if provider.get("tools") is False:
1792
1794
  payload.pop("tools", None)
1793
1795
  payload.pop("tool_choice", None)
1794
1796
 
@@ -1868,8 +1870,6 @@ async def get_upload_certificate(client: httpx.AsyncClient, api_key: str, model:
1868
1870
  print(f"获取凭证时发生未知错误: {e}")
1869
1871
  return None
1870
1872
 
1871
- from mimetypes import guess_type
1872
-
1873
1873
  async def upload_file_to_oss(client: httpx.AsyncClient, certificate: dict, file: Tuple[str, IOBase, str]) -> str:
1874
1874
  """第二步:使用凭证将文件内容上传到OSS"""
1875
1875
  upload_host = certificate.get("upload_host")
@@ -4,12 +4,16 @@ import ast
4
4
  import json
5
5
  import httpx
6
6
  import base64
7
+ import random
8
+ import string
7
9
  import asyncio
10
+ import traceback
8
11
  from time import time
9
12
  from PIL import Image
10
13
  from fastapi import HTTPException
11
- from urllib.parse import urlparse
12
14
  from collections import defaultdict
15
+ from httpx_socks import AsyncProxyTransport
16
+ from urllib.parse import urlparse, urlunparse
13
17
 
14
18
  from .log_config import logger
15
19
 
@@ -33,7 +37,6 @@ class BaseAPI:
33
37
  if api_url == "":
34
38
  api_url = "https://api.openai.com/v1/chat/completions"
35
39
  self.source_api_url: str = api_url
36
- from urllib.parse import urlparse, urlunparse
37
40
  parsed_url = urlparse(self.source_api_url)
38
41
  # print("parsed_url", parsed_url)
39
42
  if parsed_url.scheme == "":
@@ -152,7 +155,6 @@ def get_engine(provider, endpoint=None, original_model=""):
152
155
 
153
156
  return engine, stream
154
157
 
155
- from httpx_socks import AsyncProxyTransport
156
158
  def get_proxy(proxy, client_config = {}):
157
159
  if proxy:
158
160
  # 解析代理URL
@@ -223,9 +225,7 @@ async def update_initial_model(provider):
223
225
  models_id = list(set_models)
224
226
  # print(models_id)
225
227
  return models_id
226
- except Exception as e:
227
- # print("error:", e)
228
- import traceback
228
+ except Exception:
229
229
  traceback.print_exc()
230
230
  return []
231
231
 
@@ -278,27 +278,31 @@ def parse_rate_limit(limit_string):
278
278
  return limits
279
279
 
280
280
  class ThreadSafeCircularList:
281
- def __init__(self, items = [], rate_limit={"default": "999999/min"}, schedule_algorithm="round_robin"):
281
+ def __init__(self, items = [], rate_limit={"default": "999999/min"}, schedule_algorithm="round_robin", provider_name=None):
282
+ self.provider_name = provider_name
283
+ self.original_items = list(items)
284
+ self.schedule_algorithm = schedule_algorithm
285
+
282
286
  if schedule_algorithm == "random":
283
- import random
284
287
  self.items = random.sample(items, len(items))
285
- self.schedule_algorithm = "random"
286
288
  elif schedule_algorithm == "round_robin":
287
289
  self.items = items
288
- self.schedule_algorithm = "round_robin"
289
290
  elif schedule_algorithm == "fixed_priority":
290
291
  self.items = items
291
- self.schedule_algorithm = "fixed_priority"
292
+ elif schedule_algorithm == "smart_round_robin":
293
+ self.items = items
292
294
  else:
293
295
  self.items = items
294
- logger.warning(f"Unknown schedule algorithm: {schedule_algorithm}, use (round_robin, random, fixed_priority) instead")
296
+ logger.warning(f"Unknown schedule algorithm: {schedule_algorithm}, use (round_robin, random, fixed_priority, smart_round_robin) instead")
295
297
  self.schedule_algorithm = "round_robin"
298
+
296
299
  self.index = 0
297
300
  self.lock = asyncio.Lock()
298
- # 修改为二级字典,第一级是item,第二级是model
299
301
  self.requests = defaultdict(lambda: defaultdict(list))
300
302
  self.cooling_until = defaultdict(float)
301
303
  self.rate_limits = {}
304
+ self.reordering_task = None
305
+
302
306
  if isinstance(rate_limit, dict):
303
307
  for rate_limit_model, rate_limit_value in rate_limit.items():
304
308
  self.rate_limits[rate_limit_model] = parse_rate_limit(rate_limit_value)
@@ -307,6 +311,38 @@ class ThreadSafeCircularList:
307
311
  else:
308
312
  logger.error(f"Error ThreadSafeCircularList: Unknown rate_limit type: {type(rate_limit)}, rate_limit: {rate_limit}")
309
313
 
314
+ if self.schedule_algorithm == "smart_round_robin":
315
+ logger.info(f"Initializing '{self.provider_name}' with 'smart_round_robin' algorithm.")
316
+ self._trigger_reorder()
317
+
318
+ async def reset_items(self, new_items: list):
319
+ """Safely replaces the current list of items with a new one."""
320
+ async with self.lock:
321
+ if self.items != new_items:
322
+ self.items = new_items
323
+ self.index = 0
324
+ logger.info(f"Provider '{self.provider_name}' API key list has been reset and reordered.")
325
+
326
+ def _trigger_reorder(self):
327
+ """Asynchronously triggers the reordering task if not already running."""
328
+ if self.provider_name and (self.reordering_task is None or self.reordering_task.done()):
329
+ logger.info(f"Triggering reorder for provider '{self.provider_name}'...")
330
+ try:
331
+ loop = asyncio.get_running_loop()
332
+ self.reordering_task = loop.create_task(self._reorder_keys())
333
+ except RuntimeError:
334
+ logger.warning(f"No running event loop to trigger reorder for '{self.provider_name}'.")
335
+
336
+ async def _reorder_keys(self):
337
+ """Performs the actual reordering logic."""
338
+ from utils import get_sorted_api_keys
339
+ try:
340
+ sorted_keys = await get_sorted_api_keys(self.provider_name, self.original_items, group_size=100)
341
+ if sorted_keys:
342
+ await self.reset_items(sorted_keys)
343
+ except Exception as e:
344
+ logger.error(f"Error during key reordering for provider '{self.provider_name}': {e}")
345
+
310
346
  async def set_cooling(self, item: str, cooling_time: int = 60):
311
347
  """设置某个 item 进入冷却状态
312
348
 
@@ -314,7 +350,7 @@ class ThreadSafeCircularList:
314
350
  item: 需要冷却的 item
315
351
  cooling_time: 冷却时间(秒),默认60秒
316
352
  """
317
- if item == None:
353
+ if item is None:
318
354
  return
319
355
  now = time()
320
356
  async with self.lock:
@@ -374,6 +410,11 @@ class ThreadSafeCircularList:
374
410
  async with self.lock:
375
411
  if self.schedule_algorithm == "fixed_priority":
376
412
  self.index = 0
413
+
414
+ # 检查是否即将完成一个循环,并据此触发重排序
415
+ if self.schedule_algorithm == "smart_round_robin" and len(self.items) > 0 and self.index == len(self.items) - 1:
416
+ self._trigger_reorder()
417
+
377
418
  start_index = self.index
378
419
  while True:
379
420
  item = self.items[self.index]
@@ -384,7 +425,7 @@ class ThreadSafeCircularList:
384
425
 
385
426
  # 如果已经检查了所有的 API key 都被限制
386
427
  if self.index == start_index:
387
- logger.warning(f"All API keys are rate limited!")
428
+ logger.warning("All API keys are rate limited!")
388
429
  raise HTTPException(status_code=429, detail="Too many requests")
389
430
 
390
431
  async def is_tpr_exceeded(self, model: str = None, tokens: int = 0) -> bool:
@@ -488,7 +529,7 @@ c3o = ThreadSafeCircularList(["us-east5"])
488
529
  c4 = ThreadSafeCircularList(["us-east5", "us-central1", "europe-west4", "asia-southeast1"])
489
530
  c3h = ThreadSafeCircularList(["us-east5", "us-central1", "europe-west1", "europe-west4"])
490
531
  gemini1 = ThreadSafeCircularList(["us-central1", "us-east4", "us-west1", "us-west4", "europe-west1", "europe-west2"])
491
- gemini2 = ThreadSafeCircularList(["us-central1"])
532
+ gemini_preview = ThreadSafeCircularList(["global"])
492
533
  # gemini2_5_pro_exp = ThreadSafeCircularList(["global"])
493
534
  gemini2_5_pro_exp = ThreadSafeCircularList([
494
535
  "us-central1",
@@ -515,8 +556,6 @@ end_of_line = "\n\n"
515
556
  # end_of_line = "\r"
516
557
  # end_of_line = "\n"
517
558
 
518
- import random
519
- import string
520
559
  async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None, stop=None):
521
560
  random.seed(timestamp)
522
561
  random_str = ''.join(random.choices(string.ascii_letters + string.digits, k=29))
@@ -654,7 +693,7 @@ def get_image_format(file_content: bytes):
654
693
  try:
655
694
  img = Image.open(io.BytesIO(file_content))
656
695
  return img.format.lower()
657
- except:
696
+ except Exception:
658
697
  return None
659
698
 
660
699
  def encode_image(file_content: bytes):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.49
3
+ Version: 1.2.50
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "aient"
3
- version = "1.2.49"
3
+ version = "1.2.50"
4
4
  description = "Aient: The Awakening of Agent."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes