aient 1.1.71__tar.gz → 1.1.72__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {aient-1.1.71 → aient-1.1.72}/PKG-INFO +1 -1
  2. {aient-1.1.71 → aient-1.1.72}/aient/models/chatgpt.py +34 -204
  3. {aient-1.1.71 → aient-1.1.72}/aient/plugins/config.py +1 -1
  4. {aient-1.1.71 → aient-1.1.72}/aient.egg-info/PKG-INFO +1 -1
  5. {aient-1.1.71 → aient-1.1.72}/pyproject.toml +1 -1
  6. {aient-1.1.71 → aient-1.1.72}/LICENSE +0 -0
  7. {aient-1.1.71 → aient-1.1.72}/README.md +0 -0
  8. {aient-1.1.71 → aient-1.1.72}/aient/__init__.py +0 -0
  9. {aient-1.1.71 → aient-1.1.72}/aient/core/__init__.py +0 -0
  10. {aient-1.1.71 → aient-1.1.72}/aient/core/log_config.py +0 -0
  11. {aient-1.1.71 → aient-1.1.72}/aient/core/models.py +0 -0
  12. {aient-1.1.71 → aient-1.1.72}/aient/core/request.py +0 -0
  13. {aient-1.1.71 → aient-1.1.72}/aient/core/response.py +0 -0
  14. {aient-1.1.71 → aient-1.1.72}/aient/core/test/test_base_api.py +0 -0
  15. {aient-1.1.71 → aient-1.1.72}/aient/core/test/test_geminimask.py +0 -0
  16. {aient-1.1.71 → aient-1.1.72}/aient/core/test/test_image.py +0 -0
  17. {aient-1.1.71 → aient-1.1.72}/aient/core/test/test_payload.py +0 -0
  18. {aient-1.1.71 → aient-1.1.72}/aient/core/utils.py +0 -0
  19. {aient-1.1.71 → aient-1.1.72}/aient/models/__init__.py +0 -0
  20. {aient-1.1.71 → aient-1.1.72}/aient/models/audio.py +0 -0
  21. {aient-1.1.71 → aient-1.1.72}/aient/models/base.py +0 -0
  22. {aient-1.1.71 → aient-1.1.72}/aient/plugins/__init__.py +0 -0
  23. {aient-1.1.71 → aient-1.1.72}/aient/plugins/arXiv.py +0 -0
  24. {aient-1.1.71 → aient-1.1.72}/aient/plugins/excute_command.py +0 -0
  25. {aient-1.1.71 → aient-1.1.72}/aient/plugins/get_time.py +0 -0
  26. {aient-1.1.71 → aient-1.1.72}/aient/plugins/image.py +0 -0
  27. {aient-1.1.71 → aient-1.1.72}/aient/plugins/list_directory.py +0 -0
  28. {aient-1.1.71 → aient-1.1.72}/aient/plugins/read_file.py +0 -0
  29. {aient-1.1.71 → aient-1.1.72}/aient/plugins/read_image.py +0 -0
  30. {aient-1.1.71 → aient-1.1.72}/aient/plugins/readonly.py +0 -0
  31. {aient-1.1.71 → aient-1.1.72}/aient/plugins/registry.py +0 -0
  32. {aient-1.1.71 → aient-1.1.72}/aient/plugins/run_python.py +0 -0
  33. {aient-1.1.71 → aient-1.1.72}/aient/plugins/websearch.py +0 -0
  34. {aient-1.1.71 → aient-1.1.72}/aient/plugins/write_file.py +0 -0
  35. {aient-1.1.71 → aient-1.1.72}/aient/utils/__init__.py +0 -0
  36. {aient-1.1.71 → aient-1.1.72}/aient/utils/prompt.py +0 -0
  37. {aient-1.1.71 → aient-1.1.72}/aient/utils/scripts.py +0 -0
  38. {aient-1.1.71 → aient-1.1.72}/aient.egg-info/SOURCES.txt +0 -0
  39. {aient-1.1.71 → aient-1.1.72}/aient.egg-info/dependency_links.txt +0 -0
  40. {aient-1.1.71 → aient-1.1.72}/aient.egg-info/requires.txt +0 -0
  41. {aient-1.1.71 → aient-1.1.72}/aient.egg-info/top_level.txt +0 -0
  42. {aient-1.1.71 → aient-1.1.72}/setup.cfg +0 -0
  43. {aient-1.1.71 → aient-1.1.72}/test/test_Web_crawler.py +0 -0
  44. {aient-1.1.71 → aient-1.1.72}/test/test_ddg_search.py +0 -0
  45. {aient-1.1.71 → aient-1.1.72}/test/test_google_search.py +0 -0
  46. {aient-1.1.71 → aient-1.1.72}/test/test_ollama.py +0 -0
  47. {aient-1.1.71 → aient-1.1.72}/test/test_plugin.py +0 -0
  48. {aient-1.1.71 → aient-1.1.72}/test/test_search.py +0 -0
  49. {aient-1.1.71 → aient-1.1.72}/test/test_url.py +0 -0
  50. {aient-1.1.71 → aient-1.1.72}/test/test_whisper.py +0 -0
  51. {aient-1.1.71 → aient-1.1.72}/test/test_yjh.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.71
3
+ Version: 1.1.72
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -6,10 +6,7 @@ import httpx
6
6
  import asyncio
7
7
  import logging
8
8
  import inspect
9
- from typing import Set
10
9
  from typing import Union, Optional, Callable
11
- from pathlib import Path
12
-
13
10
 
14
11
  from .base import BaseLLM
15
12
  from ..plugins.registry import registry
@@ -18,27 +15,6 @@ from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xm
18
15
  from ..core.request import prepare_request_payload
19
16
  from ..core.response import fetch_response_stream, fetch_response
20
17
 
21
- def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]:
22
- """
23
- Get filtered list of object variable names.
24
- :param keys: List of keys to include. If the first key is "not", the remaining keys will be removed from the class keys.
25
- :return: List of class keys.
26
- """
27
- class_keys = obj.__dict__.keys()
28
- if not keys:
29
- return set(class_keys)
30
-
31
- # Remove the passed keys from the class keys.
32
- if keys[0] == "not":
33
- return {key for key in class_keys if key not in keys[1:]}
34
- # Check if all passed keys are valid
35
- if invalid_keys := set(keys) - class_keys:
36
- raise ValueError(
37
- f"Invalid keys: {invalid_keys}",
38
- )
39
- # Only return specified keys that are in class_keys
40
- return {key for key in keys if key in class_keys}
41
-
42
18
  class chatgpt(BaseLLM):
43
19
  """
44
20
  Official ChatGPT API
@@ -407,7 +383,7 @@ class chatgpt(BaseLLM):
407
383
 
408
384
  resp = json.loads(line) if isinstance(line, str) else line
409
385
  if "error" in resp:
410
- raise Exception(f"{resp}")
386
+ raise Exception(json.dumps({"type": "api_error", "details": resp}, ensure_ascii=False))
411
387
 
412
388
  total_tokens = total_tokens or safe_get(resp, "usage", "total_tokens", default=0)
413
389
  delta = safe_get(resp, "choices", 0, "delta")
@@ -471,9 +447,9 @@ class chatgpt(BaseLLM):
471
447
  if self.check_done:
472
448
  # self.logger.info(f"worker Response: {full_response}")
473
449
  if not full_response.strip().endswith('[done]'):
474
- raise Exception(f"Response is not ended with [done]: {full_response}")
450
+ raise Exception(json.dumps({"type": "validation_error", "message": "Response is not ended with [done]", "response": full_response}, ensure_ascii=False))
475
451
  elif not full_response.strip():
476
- raise Exception(f"Response is empty")
452
+ raise Exception(json.dumps({"type": "response_empty_error", "message": "Response is empty"}, ensure_ascii=False))
477
453
  else:
478
454
  full_response = full_response.strip().rstrip('[done]')
479
455
  full_response = full_response.replace("<tool_code>", "").replace("</tool_code>", "")
@@ -537,8 +513,10 @@ class chatgpt(BaseLLM):
537
513
  # 处理函数调用
538
514
  if need_function_call and self.use_plugins == True:
539
515
  if self.print_log:
540
- self.logger.info(f"function_parameter: {function_parameter}")
541
- self.logger.info(f"function_full_response: {function_full_response}")
516
+ if function_parameter:
517
+ self.logger.info(f"function_parameter: {function_parameter}")
518
+ else:
519
+ self.logger.info(f"function_full_response: {function_full_response}")
542
520
 
543
521
  function_response = ""
544
522
  # 定义处理单个工具调用的辅助函数
@@ -553,17 +531,13 @@ class chatgpt(BaseLLM):
553
531
  tool_response = ""
554
532
  has_args = safe_get(self.function_call_list, tool_name, "parameters", "required", default=False)
555
533
  if self.function_calls_counter[tool_name] <= self.function_call_max_loop and (tool_args != "{}" or not has_args):
556
- function_call_max_tokens = self.truncate_limit - 1000
557
- if function_call_max_tokens <= 0:
558
- function_call_max_tokens = int(self.truncate_limit / 2)
559
534
  if self.print_log:
560
- self.logger.info(f"function_call {tool_name}, max token: {function_call_max_tokens}")
535
+ self.logger.info(f"Tool use, calling: {tool_name}")
561
536
 
562
537
  # 处理函数调用结果
563
538
  if is_async:
564
539
  async for chunk in get_tools_result_async(
565
- tool_name, tool_args, function_call_max_tokens,
566
- model or self.engine, chatgpt, kwargs.get('api_key', self.api_key),
540
+ tool_name, tool_args, model or self.engine, chatgpt, kwargs.get('api_key', self.api_key),
567
541
  kwargs.get('api_url', self.api_url.chat_url), use_plugins=False, model=model or self.engine,
568
542
  add_message=self.add_to_conversation, convo_id=convo_id, language=language
569
543
  ):
@@ -571,8 +545,7 @@ class chatgpt(BaseLLM):
571
545
  else:
572
546
  async def run_async():
573
547
  async for chunk in get_tools_result_async(
574
- tool_name, tool_args, function_call_max_tokens,
575
- model or self.engine, chatgpt, kwargs.get('api_key', self.api_key),
548
+ tool_name, tool_args, model or self.engine, chatgpt, kwargs.get('api_key', self.api_key),
576
549
  kwargs.get('api_url', self.api_url.chat_url), use_plugins=False, model=model or self.engine,
577
550
  add_message=self.add_to_conversation, convo_id=convo_id, language=language
578
551
  ):
@@ -638,8 +611,8 @@ class chatgpt(BaseLLM):
638
611
  else:
639
612
  all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\n{tool_response}")
640
613
 
641
- if self.check_done:
642
- all_responses.append("Your message **must** end with [done] to signify the end of your output.")
614
+ if self.check_done:
615
+ all_responses.append("Your message **must** end with [done] to signify the end of your output.")
643
616
 
644
617
  # 合并所有工具响应
645
618
  function_response = "\n\n".join(all_responses).strip()
@@ -721,13 +694,17 @@ class chatgpt(BaseLLM):
721
694
 
722
695
  # 打印日志
723
696
  if self.print_log:
724
- self.logger.info(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}, {url}")
725
- self.logger.info(f"api_key: {kwargs.get('api_key', self.api_key)}")
697
+ self.logger.debug(f"api_url: {kwargs.get('api_url', self.api_url.chat_url)}")
698
+ self.logger.debug(f"api_key: {kwargs.get('api_key', self.api_key)}")
699
+ need_done_prompt = False
726
700
 
727
701
  # 发送请求并处理响应
728
702
  for i in range(30):
703
+ tmp_post_json = copy.deepcopy(json_post)
704
+ if need_done_prompt:
705
+ tmp_post_json["messages"].extend(need_done_prompt)
729
706
  if self.print_log:
730
- replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(json_post)))
707
+ replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(tmp_post_json)))
731
708
  replaced_text_str = json.dumps(replaced_text, indent=4, ensure_ascii=False)
732
709
  self.logger.info(f"Request Body:\n{replaced_text_str}")
733
710
 
@@ -753,11 +730,11 @@ class chatgpt(BaseLLM):
753
730
  else:
754
731
  if stream:
755
732
  generator = fetch_response_stream(
756
- self.aclient, url, headers, json_post, engine_type, model or self.engine,
733
+ self.aclient, url, headers, tmp_post_json, engine_type, model or self.engine,
757
734
  )
758
735
  else:
759
736
  generator = fetch_response(
760
- self.aclient, url, headers, json_post, engine_type, model or self.engine,
737
+ self.aclient, url, headers, tmp_post_json, engine_type, model or self.engine,
761
738
  )
762
739
 
763
740
  # 处理正常响应
@@ -777,18 +754,24 @@ class chatgpt(BaseLLM):
777
754
  except httpx.RemoteProtocolError:
778
755
  continue
779
756
  except Exception as e:
780
- if "Response is" in str(e):
781
- self.logger.error(f"{e}")
757
+ self.logger.error(f"{e}")
758
+ if "validation_error" in str(e):
759
+ bad_assistant_message = json.loads(str(e))["response"]
760
+ need_done_prompt = [
761
+ {"role": "assistant", "content": bad_assistant_message},
762
+ {"role": "user", "content": "你的消息没有以[done]结尾,请重新输出"}
763
+ ]
764
+ continue
765
+ if "response_empty_error" in str(e):
782
766
  continue
783
- self.logger.error(f"发生了未预料的错误:{e}")
784
767
  import traceback
785
768
  self.logger.error(traceback.format_exc())
786
769
  if "Invalid URL" in str(e):
787
- e = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
788
- raise Exception(f"{e}")
770
+ error_message = "您输入了无效的API URL,请使用正确的URL并使用`/start`命令重新设置API URL。具体错误如下:\n\n" + str(e)
771
+ raise Exception(json.dumps({"type": "configuration_error", "message": error_message}, ensure_ascii=False))
789
772
  # 最后一次重试失败,向上抛出异常
790
773
  if i == 11:
791
- raise Exception(f"{e}")
774
+ raise Exception(json.dumps({"type": "retry_failed", "message": str(e)}, ensure_ascii=False))
792
775
 
793
776
  def ask_stream(
794
777
  self,
@@ -915,157 +898,4 @@ class chatgpt(BaseLLM):
915
898
  {"role": "system", "content": self.system_prompt},
916
899
  ]
917
900
  self.tokens_usage[convo_id] = 0
918
- self.current_tokens[convo_id] = 0
919
-
920
- def save(self, file: str, *keys: str) -> None:
921
- """
922
- Save the Chatbot configuration to a JSON file
923
- """
924
- with open(file, "w", encoding="utf-8") as f:
925
- data = {
926
- key: self.__dict__[key]
927
- for key in get_filtered_keys_from_object(self, *keys)
928
- }
929
- # saves session.proxies dict as session
930
- # leave this here for compatibility
931
- data["session"] = data["proxy"]
932
- del data["aclient"]
933
- json.dump(
934
- data,
935
- f,
936
- indent=2,
937
- )
938
-
939
- def load(self, file: Path, *keys_: str) -> None:
940
- """
941
- Load the Chatbot configuration from a JSON file
942
- """
943
- with open(file, encoding="utf-8") as f:
944
- # load json, if session is in keys, load proxies
945
- loaded_config = json.load(f)
946
- keys = get_filtered_keys_from_object(self, *keys_)
947
-
948
- if (
949
- "session" in keys
950
- and loaded_config["session"]
951
- or "proxy" in keys
952
- and loaded_config["proxy"]
953
- ):
954
- self.proxy = loaded_config.get("session", loaded_config["proxy"])
955
- self.session = httpx.Client(
956
- follow_redirects=True,
957
- proxies=self.proxy,
958
- timeout=self.timeout,
959
- cookies=self.session.cookies,
960
- headers=self.session.headers,
961
- )
962
- self.aclient = httpx.AsyncClient(
963
- follow_redirects=True,
964
- proxies=self.proxy,
965
- timeout=self.timeout,
966
- cookies=self.session.cookies,
967
- headers=self.session.headers,
968
- )
969
- if "session" in keys:
970
- keys.remove("session")
971
- if "aclient" in keys:
972
- keys.remove("aclient")
973
- self.__dict__.update({key: loaded_config[key] for key in keys})
974
-
975
- def _handle_response_error_common(self, response_text, json_post):
976
- """通用的响应错误处理逻辑,适用于同步和异步场景"""
977
- try:
978
- # 检查内容审核失败
979
- if "Content did not pass the moral check" in response_text:
980
- return json_post, False, f"内容未通过道德检查:{response_text[:400]}"
981
-
982
- # 处理函数调用相关错误
983
- if "function calling" in response_text:
984
- if "tools" in json_post:
985
- del json_post["tools"]
986
- if "tool_choice" in json_post:
987
- del json_post["tool_choice"]
988
- return json_post, True, None
989
-
990
- # 处理请求格式错误
991
- elif "invalid_request_error" in response_text:
992
- for index, mess in enumerate(json_post["messages"]):
993
- if type(mess["content"]) == list and "text" in mess["content"][0]:
994
- json_post["messages"][index] = {
995
- "role": mess["role"],
996
- "content": mess["content"][0]["text"]
997
- }
998
- return json_post, True, None
999
-
1000
- # 处理角色不允许错误
1001
- elif "'function' is not an allowed role" in response_text:
1002
- if json_post["messages"][-1]["role"] == "tool":
1003
- mess = json_post["messages"][-1]
1004
- json_post["messages"][-1] = {
1005
- "role": "assistant",
1006
- "name": mess["name"],
1007
- "content": mess["content"]
1008
- }
1009
- return json_post, True, None
1010
-
1011
- # 处理服务器繁忙错误
1012
- elif "Sorry, server is busy" in response_text:
1013
- for index, mess in enumerate(json_post["messages"]):
1014
- if type(mess["content"]) == list and "text" in mess["content"][0]:
1015
- json_post["messages"][index] = {
1016
- "role": mess["role"],
1017
- "content": mess["content"][0]["text"]
1018
- }
1019
- return json_post, True, None
1020
-
1021
- # 处理token超限错误
1022
- elif "is not possible because the prompts occupy" in response_text:
1023
- max_tokens = re.findall(r"only\s(\d+)\stokens", response_text)
1024
- if max_tokens:
1025
- json_post["max_tokens"] = int(max_tokens[0])
1026
- return json_post, True, None
1027
-
1028
- # 默认移除工具相关设置
1029
- else:
1030
- if "tools" in json_post:
1031
- del json_post["tools"]
1032
- if "tool_choice" in json_post:
1033
- del json_post["tool_choice"]
1034
- return json_post, True, None
1035
-
1036
- except Exception as e:
1037
- self.logger.error(f"处理响应错误时出现异常: {e}")
1038
- return json_post, False, str(e)
1039
-
1040
- def _handle_response_error_sync(self, response, json_post):
1041
- """处理API响应错误并相应地修改请求体(同步版本)"""
1042
- response_text = response.text
1043
-
1044
- # 处理空响应
1045
- if response.status_code == 200 and response_text == "":
1046
- for index, mess in enumerate(json_post["messages"]):
1047
- if type(mess["content"]) == list and "text" in mess["content"][0]:
1048
- json_post["messages"][index] = {
1049
- "role": mess["role"],
1050
- "content": mess["content"][0]["text"]
1051
- }
1052
- return json_post, True
1053
-
1054
- json_post, should_retry, error_msg = self._handle_response_error_common(response_text, json_post)
1055
-
1056
- if error_msg:
1057
- raise Exception(f"{response.status_code} {response.reason} {error_msg}")
1058
-
1059
- return json_post, should_retry
1060
-
1061
- async def _handle_response_error(self, response, json_post):
1062
- """处理API响应错误并相应地修改请求体(异步版本)"""
1063
- await response.aread()
1064
- response_text = response.text
1065
-
1066
- json_post, should_retry, error_msg = self._handle_response_error_common(response_text, json_post)
1067
-
1068
- if error_msg:
1069
- raise Exception(f"{response.status_code} {response.reason_phrase} {error_msg}")
1070
-
1071
- return json_post, should_retry
901
+ self.current_tokens[convo_id] = 0
@@ -5,7 +5,7 @@ import inspect
5
5
  from .registry import registry
6
6
  from ..utils.prompt import search_key_word_prompt
7
7
 
8
- async def get_tools_result_async(function_call_name, function_full_response, function_call_max_tokens, engine, robot, api_key, api_url, use_plugins, model, add_message, convo_id, language):
8
+ async def get_tools_result_async(function_call_name, function_full_response, engine, robot, api_key, api_url, use_plugins, model, add_message, convo_id, language):
9
9
  function_response = ""
10
10
  function_to_call = None
11
11
  call_args = json.loads(function_full_response)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.1.71
3
+ Version: 1.1.72
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "aient"
3
- version = "1.1.71"
3
+ version = "1.1.72"
4
4
  description = "Aient: The Awakening of Agent."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes